diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -608,6 +608,8 @@ defm vfslide1up : RISCVBinaryAAX; defm vfslide1down : RISCVBinaryAAX; + defm vrgather : RISCVBinaryAAX; + defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; defm vasubu : RISCVSaturatingBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -178,6 +178,16 @@ } } +// This functor is used to obtain the int vector type that has the same SEW and +// multiplier as the input parameter type +class GetIntVTypeInfo +{ + // Equivalent integer vector type. Eg. + // VI8M1 → VI8M1 (identity) + // VF64M4 → VI64M4 + VTypeInfo Vti = !cast(!subst("VF", "VI", !cast(vti))); +} + class VTypeInfoToWide { VTypeInfo Vti = vti; @@ -671,9 +681,9 @@ } } -multiclass VPseudoBinaryV_VV { +multiclass VPseudoBinaryV_VV { foreach m = MxList.m in - defm _VV : VPseudoBinary; + defm _VV : VPseudoBinary; } multiclass VPseudoBinaryV_VX { @@ -682,9 +692,9 @@ !if(IsFloat, FPR32, GPR), m, Constraint>; } -multiclass VPseudoBinaryV_VI { +multiclass VPseudoBinaryV_VI { foreach m = MxList.m in - defm _VI : VPseudoBinary; + defm _VI : VPseudoBinary; } // We use earlyclobber here due to @@ -811,10 +821,10 @@ defm _VI : VPseudoBinary; } -multiclass VPseudoBinaryV_VV_VX_VI { - defm "" : VPseudoBinaryV_VV; - defm "" : VPseudoBinaryV_VX; - defm "" : VPseudoBinaryV_VI; +multiclass VPseudoBinaryV_VV_VX_VI { + defm "" : VPseudoBinaryV_VV; + defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VI; } multiclass VPseudoBinaryV_VV_VX { @@ -1248,6 +1258,17 @@ vti.RegClass, vti.RegClass>; } +multiclass VPatBinaryV_VV_INT vtilist> { + foreach vti = vtilist in { + defvar ivti = GetIntVTypeInfo.Vti; + defm : VPatBinary; + } +} + multiclass VPatBinaryV_VX vtilist> { foreach vti = vtilist in @@ -1258,6 +1279,15 @@ vti.RegClass, vti.ScalarRegClass>; } +multiclass VPatBinaryV_VX_INT vtilist> { + foreach vti = vtilist in + defm : VPatBinary; +} + multiclass VPatBinaryV_VI vtilist, Operand imm_type> { foreach vti = vtilist in @@ -1644,6 +1674,14 @@ defm "" : VPatBinaryM_VI; } +multiclass VPatBinaryV_VV_VX_VI_INT vtilist, Operand ImmType = simm5> +{ + defm "" : VPatBinaryV_VV_INT; + defm "" : VPatBinaryV_VX_INT; + defm "" : VPatBinaryV_VI; +} + multiclass VPatReductionV_VS { foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in { @@ -2079,6 +2117,11 @@ defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VX; } // Predicates = [HasStdExtV, HasStdExtF] +//===----------------------------------------------------------------------===// +// 17.4. Vector Register Gather Instructions +//===----------------------------------------------------------------------===// +defm PseudoVRGATHER : VPseudoBinaryV_VV_VX_VI; + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -2536,5 +2579,18 @@ defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; } // Predicates = [HasStdExtV, HasStdExtF] +//===----------------------------------------------------------------------===// +// 17.4. Vector Register Gather Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasStdExtV] in { + defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllIntegerVectors, uimm5>; +} // Predicates = [HasStdExtV] + +let Predicates = [HasStdExtV, HasStdExtF] in { + defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllFloatVectors, uimm5>; +} // Predicates = [HasStdExtV, HasStdExtF] + // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -0,0 +1,3624 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vrgather.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f16.nxv1i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f16.nxv2i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f16.nxv4i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f16.nxv8i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f16.nxv16i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32f16.nxv32i16( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f32.nxv1i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f32.nxv2i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f32.nxv4i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f32.nxv8i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f32.nxv16i32( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32f16.i16( + , + i16, + i32); + +define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f32.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -0,0 +1,4630 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vrgather.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f16.nxv1i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f16.nxv2i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f16.nxv4i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f16.nxv8i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f16.nxv16i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32f16.nxv32i16( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f32.nxv1i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f32.nxv2i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f32.nxv4i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f32.nxv8i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f32.nxv16i32( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f64.nxv1i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v16, v17 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f64.nxv2i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v16, v18 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f64.nxv4i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v16, v20 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f64.nxv8i64( + , + , + i64); + +define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv32f16.i16( + , + i16, + i64); + +define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv32f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv16f32.i32( + , + i32, + i64); + +define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv16f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1f64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v16, a0 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v16, a0 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v16, a0 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f64.i64( + , + i64, + i64); + +define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v16, 9 +; CHECK-NEXT: vmv1r.v v16, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v16, 9 +; CHECK-NEXT: vmv2r.v v16, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v16, 9 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +}