diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vrgather.nxv1i8.nxv1i8( , @@ -1316,585 +1316,767 @@ ret %a } -declare @llvm.riscv.vrgather.nxv1i8.i8( +declare @llvm.riscv.vrgather.nxv1f64.nxv1i64( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f64.nxv2i64( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vv v26, v8, v10 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f64.nxv4i64( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vv v28, v8, v12 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f64.nxv8i64( + , + , + i32); + +define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vv v24, v8, v16 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.nxv8i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64( + , + , + , + , + i32); + +define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv1i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.nxv1i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1i8.i8( +declare @llvm.riscv.vrgather.mask.nxv1i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2i8.i8( +declare @llvm.riscv.vrgather.nxv2i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.nxv2i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2i8.i8( +declare @llvm.riscv.vrgather.mask.nxv2i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4i8.i8( +declare @llvm.riscv.vrgather.nxv4i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.nxv4i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4i8.i8( +declare @llvm.riscv.vrgather.mask.nxv4i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8i8.i8( +declare @llvm.riscv.vrgather.nxv8i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.nxv8i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8i8.i8( +declare @llvm.riscv.vrgather.mask.nxv8i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16i8.i8( +declare @llvm.riscv.vrgather.nxv16i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.nxv16i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16i8.i8( +declare @llvm.riscv.vrgather.mask.nxv16i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32i8.i8( +declare @llvm.riscv.vrgather.nxv32i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.nxv32i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32i8.i8( +declare @llvm.riscv.vrgather.mask.nxv32i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv64i8.i8( +declare @llvm.riscv.vrgather.nxv64i8.i32( , - i8, + i32, i32); -define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.nxv64i8.i32( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv64i8.i8( +declare @llvm.riscv.vrgather.mask.nxv64i8.i32( , , - i8, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i32( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv1i16.i16( +declare @llvm.riscv.vrgather.nxv1i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.nxv1i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1i16.i16( +declare @llvm.riscv.vrgather.mask.nxv1i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2i16.i16( +declare @llvm.riscv.vrgather.nxv2i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.nxv2i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2i16.i16( +declare @llvm.riscv.vrgather.mask.nxv2i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4i16.i16( +declare @llvm.riscv.vrgather.nxv4i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.nxv4i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4i16.i16( +declare @llvm.riscv.vrgather.mask.nxv4i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8i16.i16( +declare @llvm.riscv.vrgather.nxv8i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.nxv8i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8i16.i16( +declare @llvm.riscv.vrgather.mask.nxv8i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16i16.i16( +declare @llvm.riscv.vrgather.nxv16i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.nxv16i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16i16.i16( +declare @llvm.riscv.vrgather.mask.nxv16i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32i16.i16( +declare @llvm.riscv.vrgather.nxv32i16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.nxv32i16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32i16.i16( +declare @llvm.riscv.vrgather.mask.nxv32i16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) @@ -2126,270 +2308,270 @@ ret %a } -declare @llvm.riscv.vrgather.nxv1f16.i16( +declare @llvm.riscv.vrgather.nxv1f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.nxv1f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1f16.i16( +declare @llvm.riscv.vrgather.mask.nxv1f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2f16.i16( +declare @llvm.riscv.vrgather.nxv2f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.nxv2f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2f16.i16( +declare @llvm.riscv.vrgather.mask.nxv2f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4f16.i16( +declare @llvm.riscv.vrgather.nxv4f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.nxv4f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4f16.i16( +declare @llvm.riscv.vrgather.mask.nxv4f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8f16.i16( +declare @llvm.riscv.vrgather.nxv8f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.nxv8f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8f16.i16( +declare @llvm.riscv.vrgather.mask.nxv8f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16f16.i16( +declare @llvm.riscv.vrgather.nxv16f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.nxv16f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16f16.i16( +declare @llvm.riscv.vrgather.mask.nxv16f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32f16.i16( +declare @llvm.riscv.vrgather.nxv32f16.i32( , - i16, + i32, i32); -define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.nxv32f16.i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32f16.i16( +declare @llvm.riscv.vrgather.mask.nxv32f16.i32( , , - i16, + i32, , i32); -define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) @@ -2621,429 +2803,609 @@ ret %a } -define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8: +declare @llvm.riscv.vrgather.nxv1f64.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vx v25, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv1f64.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv2f64.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vx v26, v8, a0 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv2f64.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv4f64.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vx v28, v8, a0 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv4f64.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrgather.nxv8f64.i32( + , + i32, + i32); + +define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vx v16, v8, a0 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrgather.mask.nxv8f64.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.nxv1i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.nxv2i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.nxv4i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.nxv8i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.nxv16i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.nxv32i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.nxv64i8.i32( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i32( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.nxv1i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.nxv2i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.nxv4i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.nxv8i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.nxv16i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.nxv32i16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -3215,198 +3577,198 @@ ret %a } -define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.nxv1f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.nxv2f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.nxv4f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.nxv8f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.nxv16f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.nxv32f16.i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -3577,3 +3939,135 @@ ret %a } + +define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vrgather.vi v25, v8, 9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv1f64.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv1f64.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vrgather.vi v26, v8, 9 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv2f64.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv2f64.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vrgather.vi v28, v8, 9 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv4f64.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv4f64.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vrgather.vi v16, v8, 9 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.nxv8f64.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu +; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vrgather.mask.nxv8f64.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -1680,810 +1680,810 @@ ret %a } -declare @llvm.riscv.vrgather.nxv1i8.i8( +declare @llvm.riscv.vrgather.nxv1i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.nxv1i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1i8.i8( +declare @llvm.riscv.vrgather.mask.nxv1i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2i8.i8( +declare @llvm.riscv.vrgather.nxv2i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.nxv2i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2i8.i8( +declare @llvm.riscv.vrgather.mask.nxv2i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4i8.i8( +declare @llvm.riscv.vrgather.nxv4i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.nxv4i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4i8.i8( +declare @llvm.riscv.vrgather.mask.nxv4i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8i8.i8( +declare @llvm.riscv.vrgather.nxv8i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.nxv8i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8i8.i8( +declare @llvm.riscv.vrgather.mask.nxv8i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16i8.i8( +declare @llvm.riscv.vrgather.nxv16i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.nxv16i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16i8.i8( +declare @llvm.riscv.vrgather.mask.nxv16i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32i8.i8( +declare @llvm.riscv.vrgather.nxv32i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.nxv32i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32i8.i8( +declare @llvm.riscv.vrgather.mask.nxv32i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv64i8.i8( +declare @llvm.riscv.vrgather.nxv64i8.i64( , - i8, + i64, i64); -define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.nxv64i8.i64( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv64i8.i8( +declare @llvm.riscv.vrgather.mask.nxv64i8.i64( , , - i8, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i64( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv1i16.i16( +declare @llvm.riscv.vrgather.nxv1i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.nxv1i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1i16.i16( +declare @llvm.riscv.vrgather.mask.nxv1i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2i16.i16( +declare @llvm.riscv.vrgather.nxv2i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.nxv2i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2i16.i16( +declare @llvm.riscv.vrgather.mask.nxv2i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4i16.i16( +declare @llvm.riscv.vrgather.nxv4i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.nxv4i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4i16.i16( +declare @llvm.riscv.vrgather.mask.nxv4i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8i16.i16( +declare @llvm.riscv.vrgather.nxv8i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.nxv8i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8i16.i16( +declare @llvm.riscv.vrgather.mask.nxv8i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16i16.i16( +declare @llvm.riscv.vrgather.nxv16i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.nxv16i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16i16.i16( +declare @llvm.riscv.vrgather.mask.nxv16i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32i16.i16( +declare @llvm.riscv.vrgather.nxv32i16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.nxv32i16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32i16.i16( +declare @llvm.riscv.vrgather.mask.nxv32i16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv1i32.i32( +declare @llvm.riscv.vrgather.nxv1i32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %a = call @llvm.riscv.vrgather.nxv1i32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1i32.i32( +declare @llvm.riscv.vrgather.mask.nxv1i32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2i32.i32( +declare @llvm.riscv.vrgather.nxv2i32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %a = call @llvm.riscv.vrgather.nxv2i32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2i32.i32( +declare @llvm.riscv.vrgather.mask.nxv2i32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4i32.i32( +declare @llvm.riscv.vrgather.nxv4i32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %a = call @llvm.riscv.vrgather.nxv4i32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4i32.i32( +declare @llvm.riscv.vrgather.mask.nxv4i32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8i32.i32( +declare @llvm.riscv.vrgather.nxv8i32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %a = call @llvm.riscv.vrgather.nxv8i32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8i32.i32( +declare @llvm.riscv.vrgather.mask.nxv8i32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16i32.i32( +declare @llvm.riscv.vrgather.nxv16i32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %a = call @llvm.riscv.vrgather.nxv16i32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16i32.i32( +declare @llvm.riscv.vrgather.mask.nxv16i32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) @@ -2670,495 +2670,495 @@ ret %a } -declare @llvm.riscv.vrgather.nxv1f16.i16( +declare @llvm.riscv.vrgather.nxv1f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.nxv1f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1f16.i16( +declare @llvm.riscv.vrgather.mask.nxv1f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2f16.i16( +declare @llvm.riscv.vrgather.nxv2f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.nxv2f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2f16.i16( +declare @llvm.riscv.vrgather.mask.nxv2f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4f16.i16( +declare @llvm.riscv.vrgather.nxv4f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.nxv4f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4f16.i16( +declare @llvm.riscv.vrgather.mask.nxv4f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8f16.i16( +declare @llvm.riscv.vrgather.nxv8f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.nxv8f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8f16.i16( +declare @llvm.riscv.vrgather.mask.nxv8f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16f16.i16( +declare @llvm.riscv.vrgather.nxv16f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.nxv16f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16f16.i16( +declare @llvm.riscv.vrgather.mask.nxv16f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv32f16.i16( +declare @llvm.riscv.vrgather.nxv32f16.i64( , - i16, + i64, i64); -define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.nxv32f16.i64( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv32f16.i16( +declare @llvm.riscv.vrgather.mask.nxv32f16.i64( , , - i16, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i64( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv1f32.i32( +declare @llvm.riscv.vrgather.nxv1f32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32: +define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %a = call @llvm.riscv.vrgather.nxv1f32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv1f32.i32( +declare @llvm.riscv.vrgather.mask.nxv1f32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: +define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv2f32.i32( +declare @llvm.riscv.vrgather.nxv2f32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32: +define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %a = call @llvm.riscv.vrgather.nxv2f32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv2f32.i32( +declare @llvm.riscv.vrgather.mask.nxv2f32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: +define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv4f32.i32( +declare @llvm.riscv.vrgather.nxv4f32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32: +define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %a = call @llvm.riscv.vrgather.nxv4f32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv4f32.i32( +declare @llvm.riscv.vrgather.mask.nxv4f32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: +define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv8f32.i32( +declare @llvm.riscv.vrgather.nxv8f32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32: +define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %a = call @llvm.riscv.vrgather.nxv8f32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv8f32.i32( +declare @llvm.riscv.vrgather.mask.nxv8f32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: +define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vrgather.nxv16f32.i32( +declare @llvm.riscv.vrgather.nxv16f32.i64( , - i32, + i64, i64); -define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32: +define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %a = call @llvm.riscv.vrgather.nxv16f32.i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vrgather.mask.nxv16f32.i32( +declare @llvm.riscv.vrgather.mask.nxv16f32.i64( , , - i32, + i64, , i64); -define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32: +define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) @@ -3345,594 +3345,594 @@ ret %a } -define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.nxv1i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8: +define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv1i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.nxv2i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8: +define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv2i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.nxv4i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8: +define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv4i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.nxv8i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8: +define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv8i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.nxv16i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8: +define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv16i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.nxv32i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8: +define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv32i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.nxv64i8.i64( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8: +define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv64i8.i8( + %a = call @llvm.riscv.vrgather.mask.nxv64i8.i64( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.nxv1i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16: +define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.nxv2i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16: +define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.nxv4i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16: +define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.nxv8i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16: +define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.nxv16i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16: +define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.nxv32i16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16: +define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32i16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32i16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32: +define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1i32.i32( + %a = call @llvm.riscv.vrgather.nxv1i32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: +define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv1i32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32: +define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2i32.i32( + %a = call @llvm.riscv.vrgather.nxv2i32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: +define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv2i32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32: +define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4i32.i32( + %a = call @llvm.riscv.vrgather.nxv4i32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: +define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv4i32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32: +define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8i32.i32( + %a = call @llvm.riscv.vrgather.nxv8i32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: +define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv8i32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32: +define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16i32.i32( + %a = call @llvm.riscv.vrgather.nxv16i32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32: +define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16i32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv16i32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -4071,363 +4071,363 @@ ret %a } -define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.nxv1f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16: +define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv1f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.nxv2f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16: +define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv2f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.nxv4f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16: +define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv4f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.nxv8f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16: +define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv8f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.nxv16f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16: +define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv16f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.nxv32f16.i64( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16: +define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv32f16.i16( + %a = call @llvm.riscv.vrgather.mask.nxv32f16.i64( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32: +define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv1f32.i32( + %a = call @llvm.riscv.vrgather.nxv1f32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: +define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv1f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv1f32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32: +define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv2f32.i32( + %a = call @llvm.riscv.vrgather.nxv2f32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: +define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv2f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv2f32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32: +define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv4f32.i32( + %a = call @llvm.riscv.vrgather.nxv4f32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: +define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv4f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv4f32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32: +define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv8f32.i32( + %a = call @llvm.riscv.vrgather.nxv8f32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: +define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv8f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv8f32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32: +define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.nxv16f32.i32( + %a = call @llvm.riscv.vrgather.nxv16f32.i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32: +define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vrgather.mask.nxv16f32.i32( + %a = call @llvm.riscv.vrgather.mask.nxv16f32.i64( %0, %1, - i32 9, + i64 9, %2, i64 %3)