diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -592,9 +592,10 @@ defm vse : RISCVUSStore; defm vlse: RISCVSLoad; defm vsse: RISCVSStore; - defm vloxe: RISCVILoad; - defm vsoxe: RISCVIStore; - defm vsuxe: RISCVIStore; + defm vluxei : RISCVILoad; + defm vloxei : RISCVILoad; + defm vsoxei : RISCVIStore; + defm vsuxei : RISCVIStore; defm vamoswap : RISCVAMO; defm vamoadd : RISCVAMO; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2617,6 +2617,7 @@ // Vector Indexed Loads and Stores foreach eew = EEWList in { + defm PseudoVLUXEI # eew : VPseudoILoad; defm PseudoVLOXEI # eew : VPseudoILoad; defm PseudoVSOXEI # eew : VPseudoIStore; defm PseudoVSUXEI # eew : VPseudoIStore; @@ -3182,15 +3183,19 @@ defvar elmul =!cast("V_" # elmul_str); defvar idx_vti = !cast("VI" # eew # elmul_str); - defm : VPatILoad<"int_riscv_vloxe", + defm : VPatILoad<"int_riscv_vluxei", + "PseudoVLUXEI"#eew, + vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, + vlmul, elmul, vti.RegClass, idx_vti.RegClass>; + defm : VPatILoad<"int_riscv_vloxei", "PseudoVLOXEI"#eew, vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - defm : VPatIStore<"int_riscv_vsoxe", + defm : VPatIStore<"int_riscv_vsoxei", "PseudoVSOXEI"#eew, vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, vlmul, elmul, vti.RegClass, idx_vti.RegClass>; - defm : VPatIStore<"int_riscv_vsuxe", + defm : VPatIStore<"int_riscv_vsuxei", "PseudoVSUXEI"#eew, vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, vlmul, elmul, vti.RegClass, idx_vti.RegClass>; diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -0,0 +1,4174 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv64i8.nxv64i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -0,0 +1,5954 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv64i8.nxv64i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( + *, + , + i64); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -0,0 +1,4174 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv64i8.nxv64i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -0,0 +1,5954 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv64i8.nxv64i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( + *, + , + i64); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vluxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll +++ /dev/null @@ -1,41 +0,0 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vloxe.nxv1i8.nxv1i32( - *, - , - i32); - -define @intrinsic_vloxe_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i32 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vloxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vloxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vloxe.nxv1i8.nxv1i32( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i32); - -define @intrinsic_vloxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vloxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vloxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vloxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll +++ /dev/null @@ -1,5361 +0,0 @@ -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare @llvm.riscv.vlxe.nxv1i8.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i8.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i8.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i8.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i16.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i16.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i16.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i16.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i32.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i32.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i32.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i32.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i64.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i64.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i64.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i64.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f16.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f16.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f16.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f16.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f32.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f32.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f32.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f32.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f64.nxv1i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f64.nxv2i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f64.nxv4i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f64.nxv8i64( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i64( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i8.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i8.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i8.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i8.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i8.nxv16i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i16.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i16.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i16.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i16.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i16.nxv16i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i32.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i32.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i32.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i32.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i32.nxv16i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i64.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i64.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i64.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i64.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f16.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f16.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f16.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f16.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f16.nxv16i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f32.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f32.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f32.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f32.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f32.nxv16i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f64.nxv1i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f64.nxv2i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f64.nxv4i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f64.nxv8i32( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i32( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i8.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i8.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i8.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i8.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i8.nxv16i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32i8.nxv32i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i16.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i16.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i16.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i16.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i16.nxv16i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32i16.nxv32i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i32.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i32.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i32.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i32.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i32.nxv16i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i64.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i64.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i64.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i64.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f16.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f16.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f16.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f16.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f16.nxv16i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32f16.nxv32i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f32.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f32.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f32.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f32.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f32.nxv16i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f64.nxv1i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f64.nxv2i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f64.nxv4i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f64.nxv8i16( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i16( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i8.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i8.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i8.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i8.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i8.nxv16i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32i8.nxv32i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv64i8.nxv64i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv64i8.nxv64i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i16.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i16.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i16.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i16.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i16.nxv16i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32i16.nxv32i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i32.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i32.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i32.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i32.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16i32.nxv16i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1i64.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2i64.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4i64.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8i64.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f16.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f16.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f16.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f16.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f16.nxv16i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv32f16.nxv32i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f32.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f32.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f32.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f32.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv16f32.nxv16i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv1f64.nxv1i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv2f64.nxv2i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv4f64.nxv4i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vlxe.nxv8f64.nxv8i8( - *, - , - i64); - -define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i64 %2) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i8( - * %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8( - , - *, - , - , - i64); - -define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll @@ -0,0 +1,4398 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll @@ -0,0 +1,6278 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll +++ /dev/null @@ -1,3445 +0,0 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll +++ /dev/null @@ -1,5629 +0,0 @@ -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i64.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i64.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i64.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i64.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f64.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f64.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f64.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f64.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i64.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i64.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i64.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i64.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f64.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f64.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f64.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f64.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1i64.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2i64.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4i64.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8i64.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv1f64.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv2f64.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv4f64.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsuxe.nxv8f64.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll @@ -0,0 +1,4398 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -0,0 +1,6278 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu +; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll +++ /dev/null @@ -1,3445 +0,0 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i32( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i32( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i32( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i32( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i8.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i8.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i16.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i16.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32f16.nxv32i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32f16.nxv32i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i16( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i16( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i16( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i16( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i8.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i8.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv64i8.nxv64i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv64i8.nxv64i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv64i8.nxv64i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv64i8.nxv64i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i16.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i16.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32f16.nxv32i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32f16.nxv32i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i8( - , - *, - , - i32); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i8( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i8( - , - *, - , - , - i32); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i8( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll +++ /dev/null @@ -1,5629 +0,0 @@ -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ -; RUN: --riscv-no-aliases < %s | FileCheck %s -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i64.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i64.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i64.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i64.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f64.nxv1i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f64.nxv2i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f64.nxv4i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f64.nxv8i64( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i64( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i64.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i64.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i64.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i64.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i64.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i64.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i64.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i64.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f64.nxv1i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f64.nxv1i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f64.nxv2i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f64.nxv2i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f64.nxv4i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f64.nxv4i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f64.nxv8i32( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f64.nxv8i32( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i32( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i32( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i8.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i8.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i16.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i16.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i64.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i64.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i64.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i64.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i64.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i64.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i64.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i64.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32f16.nxv32i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32f16.nxv32i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f64.nxv1i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f64.nxv1i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f64.nxv2i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f64.nxv2i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f64.nxv4i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f64.nxv4i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f64.nxv8i16( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f64.nxv8i16( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i16( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i16( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i8.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i8.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i8.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i8.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i8.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i8.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i8.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i8.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i8.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i8.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i8.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i8.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i8.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i8.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i8.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i8.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i8.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i8.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv64i8.nxv64i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv64i8.nxv64i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv64i8.nxv64i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv64i8.nxv64i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i16.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i16.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i16_nxv1i16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i16.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i16.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i16_nxv2i16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i16.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i16.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i16_nxv4i16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i16.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i16.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i16_nxv8i16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i16.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i16.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i16_nxv16i16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32i16.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32i16.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32i16_nxv32i16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32i16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i32.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i32.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i32_nxv1i32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i32.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i32.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i32_nxv2i32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i32.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i32.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i32_nxv4i32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i32.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i32.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i32_nxv8i32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16i32.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16i32.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16i32_nxv16i32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16i32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1i64.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1i64.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1i64_nxv1i64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1i64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2i64.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2i64.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2i64_nxv2i64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2i64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4i64.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4i64.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4i64_nxv4i64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4i64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8i64.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8i64.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8i64_nxv8i64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8i64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f16.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f16.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f16_nxv1f16_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f16.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f16.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f16.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f16_nxv2f16_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f16.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f16.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f16.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f16_nxv4f16_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f16.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f16.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f16.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f16_nxv8f16_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f16.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f16.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f16.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f16_nxv16f16_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f16.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv32f16.nxv32i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv32f16.nxv32i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv32f16_nxv32f16_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv32f16.nxv32i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f32.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f32.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f32_nxv1f32_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f32.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f32.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f32.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f32_nxv2f32_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f32.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f32.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f32.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f32_nxv4f32_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f32.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f32.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f32.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f32_nxv8f32_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f32.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv16f32.nxv16i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv16f32.nxv16i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv16f32_nxv16f32_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv16f32.nxv16i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv1f64.nxv1i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv1f64.nxv1i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv1f64_nxv1f64_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv1f64.nxv1i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv2f64.nxv2i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv2f64.nxv2i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv2f64_nxv2f64_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv2f64.nxv2i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv4f64.nxv4i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv4f64.nxv4i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv4f64_nxv4f64_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv4f64.nxv4i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -} - -declare void @llvm.riscv.vsoxe.nxv8f64.nxv8i8( - , - *, - , - i64); - -define void @intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} - call void @llvm.riscv.vsoxe.nxv8f64.nxv8i8( - %0, - * %1, - %2, - i64 %3) - - ret void -} - -declare void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i8( - , - *, - , - , - i64); - -define void @intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { -entry: -; CHECK-LABEL: intrinsic_vsoxe_mask_v_nxv8f64_nxv8f64_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vsoxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t - call void @llvm.riscv.vsoxe.mask.nxv8f64.nxv8i8( - %0, - * %1, - %2, - %3, - i64 %4) - - ret void -}