diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -496,858 +496,858 @@ ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( , - i8, + i32, i32); -define @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( , , - i8, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( , - i16, + i32, i32); -define @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( , , - i16, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( , - i16, + i32, i32); -define @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( , , - i16, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( , - i16, + i32, i32); -define @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( , , - i16, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( , - i16, + i32, i32); -define @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( , , - i16, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( , - i16, + i32, i32); -define @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( , , - i16, + i32, , i32); -define @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -define @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -676,1170 +676,1170 @@ ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( , - i8, + i64, i64); -define @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( , , - i8, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( , - i16, + i64, i64); -define @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( , , - i16, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( , - i16, + i64, i64); -define @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( , , - i16, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( , - i16, + i64, i64); -define @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( , , - i16, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( , - i16, + i64, i64); -define @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( , , - i16, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( , - i16, + i64, i64); -define @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( , , - i16, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( , - i32, + i64, i64); -define @intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64( , , - i32, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( , - i32, + i64, i64); -define @intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64( , , - i32, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( , - i32, + i64, i64); -define @intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64( , , - i32, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( , - i32, + i64, i64); -define @intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64( , , - i32, + i64, , i64); -define @intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -define @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -496,858 +496,858 @@ ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( , - i8, + i32, i32); -define @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( , , - i8, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( , - i16, + i32, i32); -define @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( , , - i16, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( , - i16, + i32, i32); -define @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( , , - i16, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( , - i16, + i32, i32); -define @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( , , - i16, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( , - i16, + i32, i32); -define @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( , , - i16, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( , - i16, + i32, i32); -define @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( , , - i16, + i32, , i32); -define @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -define @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -676,1170 +676,1170 @@ ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( , - i8, + i64, i64); -define @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( , , - i8, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( , - i16, + i64, i64); -define @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( , , - i16, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( , - i16, + i64, i64); -define @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( , , - i16, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( , - i16, + i64, i64); -define @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( , , - i16, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( , - i16, + i64, i64); -define @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( , , - i16, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( , - i16, + i64, i64); -define @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( , , - i16, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( , - i32, + i64, i64); -define @intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( , , - i32, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( , - i32, + i64, i64); -define @intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( , , - i32, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( , - i32, + i64, i64); -define @intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( , , - i32, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( , - i32, + i64, i64); -define @intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( , , - i32, + i64, , i64); -define @intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -define @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -496,1352 +496,860 @@ ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( - , - , - i32); - -define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu -; CHECK-NEXT: vnsra.wv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - i32); - -define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu -; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( - , - , - i32); - -define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu -; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - i32); - -define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu -; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( - , - , - i32); - -define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu -; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - i32); - -define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu -; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( - , - , - i32); - -define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu -; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - i32); - -define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu -; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( , - i8, + i32, i32); -define @intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( , , - i8, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( , - i16, + i32, i32); -define @intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( , , - i16, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( , - i16, + i32, i32); -define @intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( , , - i16, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( , - i16, + i32, i32); -define @intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( , , - i16, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( , - i16, + i32, i32); -define @intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( , , - i16, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( , - i16, + i32, i32); -define @intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( , , - i16, + i32, , i32); -define @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( - , - i32, - i32); - -define @intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( - , - i32, - i32); - -define @intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( - , - i32, - i32); - -define @intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( - , - i32, - i32); - -define @intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -define @intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } - -define @intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu -; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu -; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu -; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu -; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu -; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu -; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu -; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu -; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -676,1170 +676,1170 @@ ret %a } -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( , - i8, + i64, i64); -define @intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( , , - i8, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( , - i16, + i64, i64); -define @intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( , , - i16, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( , - i16, + i64, i64); -define @intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( , , - i16, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( , - i16, + i64, i64); -define @intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( , , - i16, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( , - i16, + i64, i64); -define @intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( , , - i16, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( , - i16, + i64, i64); -define @intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( , , - i16, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( , - i32, + i64, i64); -define @intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64( , , - i32, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( , - i32, + i64, i64); -define @intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64( , , - i32, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( , - i32, + i64, i64); -define @intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64( , , - i32, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( , - i32, + i64, i64); -define @intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64( , , - i32, + i64, , i64); -define @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -define @intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsra_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -496,1352 +496,860 @@ ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( - , - , - i32); - -define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu -; CHECK-NEXT: vnsrl.wv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - i32); - -define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu -; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( - , - , - i32); - -define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu -; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - i32); - -define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu -; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( - , - , - i32); - -define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu -; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - i32); - -define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu -; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( - , - , - i32); - -define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu -; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - i32); - -define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu -; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( , - i8, + i32, i32); -define @intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( , , - i8, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( , - i16, + i32, i32); -define @intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( , , - i16, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( , - i16, + i32, i32); -define @intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( , , - i16, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( , - i16, + i32, i32); -define @intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( , , - i16, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( , - i16, + i32, i32); -define @intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( , , - i16, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( , - i16, + i32, i32); -define @intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( , , - i16, + i32, , i32); -define @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( - , - i32, - i32); - -define @intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( - , - i32, - i32); - -define @intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( - , - i32, - i32); - -define @intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( - , - i32, - i32); - -define @intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( - , - , - i32, - , - i32); - -define @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -define @intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( %0, - i8 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( %0, - i16 9, + i32 9, i32 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i32 9, %2, i32 %3) ret %a } - -define @intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu -; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu -; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu -; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu -; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu -; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu -; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu -; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu -; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -676,1170 +676,1170 @@ ret %a } -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( , - i8, + i64, i64); -define @intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( +declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( , , - i8, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( , - i16, + i64, i64); -define @intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( , , - i16, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( , - i16, + i64, i64); -define @intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( , , - i16, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( , - i16, + i64, i64); -define @intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( , , - i16, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( , - i16, + i64, i64); -define @intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( , , - i16, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( , - i16, + i64, i64); -define @intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( +declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( , , - i16, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( , - i32, + i64, i64); -define @intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( +declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64( , , - i32, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( , - i32, + i64, i64); -define @intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( +declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64( , , - i32, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( , - i32, + i64, i64); -define @intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( +declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64( , , - i32, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( , - i32, + i64, i64); -define @intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( +declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64( , , - i32, + i64, , i64); -define @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -define @intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( %0, - i8 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8: +define @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8( + %a = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( %0, %1, - i8 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( %0, - i16 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16: +define @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16( + %a = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( %0, %1, - i16 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32: +define @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32: +define @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 ; CHECK-NEXT: vmv2r.v v8, v26 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32: +define @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) ret %a } -define @intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 ; CHECK-NEXT: vmv4r.v v8, v28 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( %0, - i32 9, + i64 9, i64 %1) ret %a } -define @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32: +define @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32( + %a = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64( %0, %1, - i32 9, + i64 9, %2, i64 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -796,591 +796,591 @@ ret %a } -declare @llvm.riscv.vsll.nxv1i8.i8( +declare @llvm.riscv.vsll.nxv1i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i8.i8( + %a = call @llvm.riscv.vsll.nxv1i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.i8( +declare @llvm.riscv.vsll.mask.nxv1i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv1i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv2i8.i8( +declare @llvm.riscv.vsll.nxv2i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i8.i8( + %a = call @llvm.riscv.vsll.nxv2i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8.i8( +declare @llvm.riscv.vsll.mask.nxv2i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv2i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv4i8.i8( +declare @llvm.riscv.vsll.nxv4i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i8.i8( + %a = call @llvm.riscv.vsll.nxv4i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8.i8( +declare @llvm.riscv.vsll.mask.nxv4i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv4i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv8i8.i8( +declare @llvm.riscv.vsll.nxv8i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i8.i8( + %a = call @llvm.riscv.vsll.nxv8i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8.i8( +declare @llvm.riscv.vsll.mask.nxv8i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv8i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv16i8.i8( +declare @llvm.riscv.vsll.nxv16i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i8.i8( + %a = call @llvm.riscv.vsll.nxv16i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8.i8( +declare @llvm.riscv.vsll.mask.nxv16i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv16i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv32i8.i8( +declare @llvm.riscv.vsll.nxv32i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i8.i8( + %a = call @llvm.riscv.vsll.nxv32i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8.i8( +declare @llvm.riscv.vsll.mask.nxv32i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv32i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv64i8.i8( +declare @llvm.riscv.vsll.nxv64i8( , - i8, + i32, i32); -define @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv64i8.i8( + %a = call @llvm.riscv.vsll.nxv64i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8.i8( +declare @llvm.riscv.vsll.mask.nxv64i8( , , - i8, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv64i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv1i16.i16( +declare @llvm.riscv.vsll.nxv1i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i16.i16( + %a = call @llvm.riscv.vsll.nxv1i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16.i16( +declare @llvm.riscv.vsll.mask.nxv1i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv1i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv2i16.i16( +declare @llvm.riscv.vsll.nxv2i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i16.i16( + %a = call @llvm.riscv.vsll.nxv2i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16.i16( +declare @llvm.riscv.vsll.mask.nxv2i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv2i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv4i16.i16( +declare @llvm.riscv.vsll.nxv4i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i16.i16( + %a = call @llvm.riscv.vsll.nxv4i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16.i16( +declare @llvm.riscv.vsll.mask.nxv4i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv4i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv8i16.i16( +declare @llvm.riscv.vsll.nxv8i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i16.i16( + %a = call @llvm.riscv.vsll.nxv8i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16.i16( +declare @llvm.riscv.vsll.mask.nxv8i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv8i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv16i16.i16( +declare @llvm.riscv.vsll.nxv16i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i16.i16( + %a = call @llvm.riscv.vsll.nxv16i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16.i16( +declare @llvm.riscv.vsll.mask.nxv16i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv16i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv32i16.i16( +declare @llvm.riscv.vsll.nxv32i16( , - i16, + i32, i32); -define @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i16.i16( + %a = call @llvm.riscv.vsll.nxv32i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16.i16( +declare @llvm.riscv.vsll.mask.nxv32i16( , , - i16, + i32, , i32); -define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv32i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsll.nxv1i32.i32( +declare @llvm.riscv.vsll.nxv1i32( , i32, i32); -define @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i32.i32( + %a = call @llvm.riscv.vsll.nxv1i32( %0, i32 %1, i32 %2) @@ -1388,21 +1388,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32.i32( +declare @llvm.riscv.vsll.mask.nxv1i32( , , i32, , i32); -define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv1i32( %0, %1, i32 %2, @@ -1412,19 +1412,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv2i32.i32( +declare @llvm.riscv.vsll.nxv2i32( , i32, i32); -define @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i32.i32( + %a = call @llvm.riscv.vsll.nxv2i32( %0, i32 %1, i32 %2) @@ -1432,21 +1432,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32.i32( +declare @llvm.riscv.vsll.mask.nxv2i32( , , i32, , i32); -define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv2i32( %0, %1, i32 %2, @@ -1456,19 +1456,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv4i32.i32( +declare @llvm.riscv.vsll.nxv4i32( , i32, i32); -define @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i32.i32( + %a = call @llvm.riscv.vsll.nxv4i32( %0, i32 %1, i32 %2) @@ -1476,21 +1476,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32.i32( +declare @llvm.riscv.vsll.mask.nxv4i32( , , i32, , i32); -define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv4i32( %0, %1, i32 %2, @@ -1500,19 +1500,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv8i32.i32( +declare @llvm.riscv.vsll.nxv8i32( , i32, i32); -define @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i32.i32( + %a = call @llvm.riscv.vsll.nxv8i32( %0, i32 %1, i32 %2) @@ -1520,21 +1520,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32.i32( +declare @llvm.riscv.vsll.mask.nxv8i32( , , i32, , i32); -define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv8i32( %0, %1, i32 %2, @@ -1544,19 +1544,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv16i32.i32( +declare @llvm.riscv.vsll.nxv16i32( , i32, i32); -define @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i32.i32( + %a = call @llvm.riscv.vsll.nxv16i32( %0, i32 %1, i32 %2) @@ -1564,21 +1564,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32.i32( +declare @llvm.riscv.vsll.mask.nxv16i32( , , i32, , i32); -define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv16i32( %0, %1, i32 %2, @@ -1595,9 +1595,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i8.i8( + %a = call @llvm.riscv.vsll.nxv1i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1610,10 +1610,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv1i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i8.i8( + %a = call @llvm.riscv.vsll.nxv2i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1642,10 +1642,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv2i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1659,9 +1659,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i8.i8( + %a = call @llvm.riscv.vsll.nxv4i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1674,10 +1674,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv4i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1691,9 +1691,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i8.i8( + %a = call @llvm.riscv.vsll.nxv8i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1706,10 +1706,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv8i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1723,9 +1723,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i8.i8( + %a = call @llvm.riscv.vsll.nxv16i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1738,10 +1738,10 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv16i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1755,9 +1755,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i8.i8( + %a = call @llvm.riscv.vsll.nxv32i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1770,10 +1770,10 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv32i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1787,9 +1787,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv64i8.i8( + %a = call @llvm.riscv.vsll.nxv64i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1802,10 +1802,10 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv64i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1819,9 +1819,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i16.i16( + %a = call @llvm.riscv.vsll.nxv1i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1834,10 +1834,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv1i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1851,9 +1851,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i16.i16( + %a = call @llvm.riscv.vsll.nxv2i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1866,10 +1866,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv2i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1883,9 +1883,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i16.i16( + %a = call @llvm.riscv.vsll.nxv4i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1898,10 +1898,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv4i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1915,9 +1915,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i16.i16( + %a = call @llvm.riscv.vsll.nxv8i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1930,10 +1930,10 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv8i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1947,9 +1947,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i16.i16( + %a = call @llvm.riscv.vsll.nxv16i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1962,10 +1962,10 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv16i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1979,9 +1979,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i16.i16( + %a = call @llvm.riscv.vsll.nxv32i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1994,10 +1994,10 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv32i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2011,7 +2011,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i32.i32( + %a = call @llvm.riscv.vsll.nxv1i32( %0, i32 9, i32 %1) @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv1i32( %0, %1, i32 9, @@ -2043,7 +2043,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i32.i32( + %a = call @llvm.riscv.vsll.nxv2i32( %0, i32 9, i32 %1) @@ -2058,7 +2058,7 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv2i32( %0, %1, i32 9, @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i32.i32( + %a = call @llvm.riscv.vsll.nxv4i32( %0, i32 9, i32 %1) @@ -2090,7 +2090,7 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv4i32( %0, %1, i32 9, @@ -2107,7 +2107,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i32.i32( + %a = call @llvm.riscv.vsll.nxv8i32( %0, i32 9, i32 %1) @@ -2122,7 +2122,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv8i32( %0, %1, i32 9, @@ -2139,7 +2139,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i32.i32( + %a = call @llvm.riscv.vsll.nxv16i32( %0, i32 9, i32 %1) @@ -2154,7 +2154,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv16i32( %0, %1, i32 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -973,811 +973,811 @@ ret %a } -declare @llvm.riscv.vsll.nxv1i8.i8( +declare @llvm.riscv.vsll.nxv1i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i8.i8( + %a = call @llvm.riscv.vsll.nxv1i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.i8( +declare @llvm.riscv.vsll.mask.nxv1i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv1i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv2i8.i8( +declare @llvm.riscv.vsll.nxv2i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i8.i8( + %a = call @llvm.riscv.vsll.nxv2i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8.i8( +declare @llvm.riscv.vsll.mask.nxv2i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv2i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv4i8.i8( +declare @llvm.riscv.vsll.nxv4i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i8.i8( + %a = call @llvm.riscv.vsll.nxv4i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8.i8( +declare @llvm.riscv.vsll.mask.nxv4i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv4i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv8i8.i8( +declare @llvm.riscv.vsll.nxv8i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i8.i8( + %a = call @llvm.riscv.vsll.nxv8i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8.i8( +declare @llvm.riscv.vsll.mask.nxv8i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv8i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv16i8.i8( +declare @llvm.riscv.vsll.nxv16i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i8.i8( + %a = call @llvm.riscv.vsll.nxv16i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8.i8( +declare @llvm.riscv.vsll.mask.nxv16i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv16i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv32i8.i8( +declare @llvm.riscv.vsll.nxv32i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i8.i8( + %a = call @llvm.riscv.vsll.nxv32i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8.i8( +declare @llvm.riscv.vsll.mask.nxv32i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv32i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv64i8.i8( +declare @llvm.riscv.vsll.nxv64i8( , - i8, + i64, i64); -define @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv64i8.i8( + %a = call @llvm.riscv.vsll.nxv64i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8.i8( +declare @llvm.riscv.vsll.mask.nxv64i8( , , - i8, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv64i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv1i16.i16( +declare @llvm.riscv.vsll.nxv1i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i16.i16( + %a = call @llvm.riscv.vsll.nxv1i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16.i16( +declare @llvm.riscv.vsll.mask.nxv1i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv1i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv2i16.i16( +declare @llvm.riscv.vsll.nxv2i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i16.i16( + %a = call @llvm.riscv.vsll.nxv2i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16.i16( +declare @llvm.riscv.vsll.mask.nxv2i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv2i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv4i16.i16( +declare @llvm.riscv.vsll.nxv4i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i16.i16( + %a = call @llvm.riscv.vsll.nxv4i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16.i16( +declare @llvm.riscv.vsll.mask.nxv4i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv4i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv8i16.i16( +declare @llvm.riscv.vsll.nxv8i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i16.i16( + %a = call @llvm.riscv.vsll.nxv8i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16.i16( +declare @llvm.riscv.vsll.mask.nxv8i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv8i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv16i16.i16( +declare @llvm.riscv.vsll.nxv16i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i16.i16( + %a = call @llvm.riscv.vsll.nxv16i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16.i16( +declare @llvm.riscv.vsll.mask.nxv16i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv16i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv32i16.i16( +declare @llvm.riscv.vsll.nxv32i16( , - i16, + i64, i64); -define @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i16.i16( + %a = call @llvm.riscv.vsll.nxv32i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16.i16( +declare @llvm.riscv.vsll.mask.nxv32i16( , , - i16, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv32i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv1i32.i32( +declare @llvm.riscv.vsll.nxv1i32( , - i32, + i64, i64); -define @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i32.i32( + %a = call @llvm.riscv.vsll.nxv1i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32.i32( +declare @llvm.riscv.vsll.mask.nxv1i32( , , - i32, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv1i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv2i32.i32( +declare @llvm.riscv.vsll.nxv2i32( , - i32, + i64, i64); -define @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i32.i32( + %a = call @llvm.riscv.vsll.nxv2i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32.i32( +declare @llvm.riscv.vsll.mask.nxv2i32( , , - i32, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv2i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv4i32.i32( +declare @llvm.riscv.vsll.nxv4i32( , - i32, + i64, i64); -define @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i32.i32( + %a = call @llvm.riscv.vsll.nxv4i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32.i32( +declare @llvm.riscv.vsll.mask.nxv4i32( , , - i32, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv4i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv8i32.i32( +declare @llvm.riscv.vsll.nxv8i32( , - i32, + i64, i64); -define @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i32.i32( + %a = call @llvm.riscv.vsll.nxv8i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32.i32( +declare @llvm.riscv.vsll.mask.nxv8i32( , , - i32, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv8i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv16i32.i32( +declare @llvm.riscv.vsll.nxv16i32( , - i32, + i64, i64); -define @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i32.i32( + %a = call @llvm.riscv.vsll.nxv16i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32.i32( +declare @llvm.riscv.vsll.mask.nxv16i32( , , - i32, + i64, , i64); -define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv16i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsll.nxv1i64.i64( +declare @llvm.riscv.vsll.nxv1i64( , i64, i64); -define @intrinsic_vsll_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i64.i64( + %a = call @llvm.riscv.vsll.nxv1i64( %0, i64 %1, i64 %2) @@ -1785,21 +1785,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv1i64.i64( +declare @llvm.riscv.vsll.mask.nxv1i64( , , i64, , i64); -define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv1i64( %0, %1, i64 %2, @@ -1809,19 +1809,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv2i64.i64( +declare @llvm.riscv.vsll.nxv2i64( , i64, i64); -define @intrinsic_vsll_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i64.i64( + %a = call @llvm.riscv.vsll.nxv2i64( %0, i64 %1, i64 %2) @@ -1829,21 +1829,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv2i64.i64( +declare @llvm.riscv.vsll.mask.nxv2i64( , , i64, , i64); -define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv2i64( %0, %1, i64 %2, @@ -1853,19 +1853,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv4i64.i64( +declare @llvm.riscv.vsll.nxv4i64( , i64, i64); -define @intrinsic_vsll_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i64.i64( + %a = call @llvm.riscv.vsll.nxv4i64( %0, i64 %1, i64 %2) @@ -1873,21 +1873,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv4i64.i64( +declare @llvm.riscv.vsll.mask.nxv4i64( , , i64, , i64); -define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv4i64( %0, %1, i64 %2, @@ -1897,19 +1897,19 @@ ret %a } -declare @llvm.riscv.vsll.nxv8i64.i64( +declare @llvm.riscv.vsll.nxv8i64( , i64, i64); -define @intrinsic_vsll_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i64.i64( + %a = call @llvm.riscv.vsll.nxv8i64( %0, i64 %1, i64 %2) @@ -1917,21 +1917,21 @@ ret %a } -declare @llvm.riscv.vsll.mask.nxv8i64.i64( +declare @llvm.riscv.vsll.mask.nxv8i64( , , i64, , i64); -define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv8i64( %0, %1, i64 %2, @@ -1948,9 +1948,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i8.i8( + %a = call @llvm.riscv.vsll.nxv1i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1963,10 +1963,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv1i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -1980,9 +1980,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i8.i8( + %a = call @llvm.riscv.vsll.nxv2i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1995,10 +1995,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv2i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2012,9 +2012,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i8.i8( + %a = call @llvm.riscv.vsll.nxv4i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2027,10 +2027,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv4i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2044,9 +2044,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i8.i8( + %a = call @llvm.riscv.vsll.nxv8i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2059,10 +2059,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv8i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2076,9 +2076,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i8.i8( + %a = call @llvm.riscv.vsll.nxv16i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2091,10 +2091,10 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv16i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2108,9 +2108,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i8.i8( + %a = call @llvm.riscv.vsll.nxv32i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2123,10 +2123,10 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv32i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv64i8.i8( + %a = call @llvm.riscv.vsll.nxv64i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2155,10 +2155,10 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsll.mask.nxv64i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2172,9 +2172,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i16.i16( + %a = call @llvm.riscv.vsll.nxv1i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2187,10 +2187,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv1i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2204,9 +2204,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i16.i16( + %a = call @llvm.riscv.vsll.nxv2i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2219,10 +2219,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv2i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2236,9 +2236,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i16.i16( + %a = call @llvm.riscv.vsll.nxv4i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2251,10 +2251,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv4i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2268,9 +2268,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i16.i16( + %a = call @llvm.riscv.vsll.nxv8i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2283,10 +2283,10 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv8i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2300,9 +2300,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i16.i16( + %a = call @llvm.riscv.vsll.nxv16i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2315,10 +2315,10 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv16i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2332,9 +2332,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv32i16.i16( + %a = call @llvm.riscv.vsll.nxv32i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2347,10 +2347,10 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsll.mask.nxv32i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2364,9 +2364,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i32.i32( + %a = call @llvm.riscv.vsll.nxv1i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2379,10 +2379,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv1i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2396,9 +2396,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i32.i32( + %a = call @llvm.riscv.vsll.nxv2i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2411,10 +2411,10 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv2i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2428,9 +2428,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i32.i32( + %a = call @llvm.riscv.vsll.nxv4i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2443,10 +2443,10 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv4i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2460,9 +2460,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i32.i32( + %a = call @llvm.riscv.vsll.nxv8i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2475,10 +2475,10 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv8i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2492,9 +2492,9 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv16i32.i32( + %a = call @llvm.riscv.vsll.nxv16i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2507,10 +2507,10 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsll.mask.nxv16i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2524,7 +2524,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv1i64.i64( + %a = call @llvm.riscv.vsll.nxv1i64( %0, i64 9, i64 %1) @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv1i64( %0, %1, i64 9, @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv2i64.i64( + %a = call @llvm.riscv.vsll.nxv2i64( %0, i64 9, i64 %1) @@ -2571,7 +2571,7 @@ ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv2i64( %0, %1, i64 9, @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv4i64.i64( + %a = call @llvm.riscv.vsll.nxv4i64( %0, i64 9, i64 %1) @@ -2603,7 +2603,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv4i64( %0, %1, i64 9, @@ -2620,7 +2620,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.nxv8i64.i64( + %a = call @llvm.riscv.vsll.nxv8i64( %0, i64 9, i64 %1) @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsll.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsll.mask.nxv8i64( %0, %1, i64 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -796,591 +796,591 @@ ret %a } -declare @llvm.riscv.vsra.nxv1i8.i8( +declare @llvm.riscv.vsra.nxv1i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i8.i8( + %a = call @llvm.riscv.vsra.nxv1i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8.i8( +declare @llvm.riscv.vsra.mask.nxv1i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv1i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv2i8.i8( +declare @llvm.riscv.vsra.nxv2i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i8.i8( + %a = call @llvm.riscv.vsra.nxv2i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8.i8( +declare @llvm.riscv.vsra.mask.nxv2i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv2i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv4i8.i8( +declare @llvm.riscv.vsra.nxv4i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i8.i8( + %a = call @llvm.riscv.vsra.nxv4i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8.i8( +declare @llvm.riscv.vsra.mask.nxv4i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv4i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv8i8.i8( +declare @llvm.riscv.vsra.nxv8i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i8.i8( + %a = call @llvm.riscv.vsra.nxv8i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8.i8( +declare @llvm.riscv.vsra.mask.nxv8i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv8i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv16i8.i8( +declare @llvm.riscv.vsra.nxv16i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i8.i8( + %a = call @llvm.riscv.vsra.nxv16i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8.i8( +declare @llvm.riscv.vsra.mask.nxv16i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv16i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv32i8.i8( +declare @llvm.riscv.vsra.nxv32i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i8.i8( + %a = call @llvm.riscv.vsra.nxv32i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8.i8( +declare @llvm.riscv.vsra.mask.nxv32i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv32i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv64i8.i8( +declare @llvm.riscv.vsra.nxv64i8( , - i8, + i32, i32); -define @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv64i8.i8( + %a = call @llvm.riscv.vsra.nxv64i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8.i8( +declare @llvm.riscv.vsra.mask.nxv64i8( , , - i8, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv64i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv1i16.i16( +declare @llvm.riscv.vsra.nxv1i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i16.i16( + %a = call @llvm.riscv.vsra.nxv1i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16.i16( +declare @llvm.riscv.vsra.mask.nxv1i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv1i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv2i16.i16( +declare @llvm.riscv.vsra.nxv2i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i16.i16( + %a = call @llvm.riscv.vsra.nxv2i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16.i16( +declare @llvm.riscv.vsra.mask.nxv2i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv2i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv4i16.i16( +declare @llvm.riscv.vsra.nxv4i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i16.i16( + %a = call @llvm.riscv.vsra.nxv4i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16.i16( +declare @llvm.riscv.vsra.mask.nxv4i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv4i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv8i16.i16( +declare @llvm.riscv.vsra.nxv8i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i16.i16( + %a = call @llvm.riscv.vsra.nxv8i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16.i16( +declare @llvm.riscv.vsra.mask.nxv8i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv8i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv16i16.i16( +declare @llvm.riscv.vsra.nxv16i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i16.i16( + %a = call @llvm.riscv.vsra.nxv16i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16.i16( +declare @llvm.riscv.vsra.mask.nxv16i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv16i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv32i16.i16( +declare @llvm.riscv.vsra.nxv32i16( , - i16, + i32, i32); -define @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i16.i16( + %a = call @llvm.riscv.vsra.nxv32i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16.i16( +declare @llvm.riscv.vsra.mask.nxv32i16( , , - i16, + i32, , i32); -define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv32i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsra.nxv1i32.i32( +declare @llvm.riscv.vsra.nxv1i32( , i32, i32); -define @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i32.i32( + %a = call @llvm.riscv.vsra.nxv1i32( %0, i32 %1, i32 %2) @@ -1388,21 +1388,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32.i32( +declare @llvm.riscv.vsra.mask.nxv1i32( , , i32, , i32); -define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv1i32( %0, %1, i32 %2, @@ -1412,19 +1412,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv2i32.i32( +declare @llvm.riscv.vsra.nxv2i32( , i32, i32); -define @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i32.i32( + %a = call @llvm.riscv.vsra.nxv2i32( %0, i32 %1, i32 %2) @@ -1432,21 +1432,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32.i32( +declare @llvm.riscv.vsra.mask.nxv2i32( , , i32, , i32); -define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv2i32( %0, %1, i32 %2, @@ -1456,19 +1456,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv4i32.i32( +declare @llvm.riscv.vsra.nxv4i32( , i32, i32); -define @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i32.i32( + %a = call @llvm.riscv.vsra.nxv4i32( %0, i32 %1, i32 %2) @@ -1476,21 +1476,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32.i32( +declare @llvm.riscv.vsra.mask.nxv4i32( , , i32, , i32); -define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv4i32( %0, %1, i32 %2, @@ -1500,19 +1500,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv8i32.i32( +declare @llvm.riscv.vsra.nxv8i32( , i32, i32); -define @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i32.i32( + %a = call @llvm.riscv.vsra.nxv8i32( %0, i32 %1, i32 %2) @@ -1520,21 +1520,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32.i32( +declare @llvm.riscv.vsra.mask.nxv8i32( , , i32, , i32); -define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv8i32( %0, %1, i32 %2, @@ -1544,19 +1544,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv16i32.i32( +declare @llvm.riscv.vsra.nxv16i32( , i32, i32); -define @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i32.i32( + %a = call @llvm.riscv.vsra.nxv16i32( %0, i32 %1, i32 %2) @@ -1564,21 +1564,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32.i32( +declare @llvm.riscv.vsra.mask.nxv16i32( , , i32, , i32); -define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv16i32( %0, %1, i32 %2, @@ -1595,9 +1595,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i8.i8( + %a = call @llvm.riscv.vsra.nxv1i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1610,10 +1610,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv1i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i8.i8( + %a = call @llvm.riscv.vsra.nxv2i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1642,10 +1642,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv2i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1659,9 +1659,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i8.i8( + %a = call @llvm.riscv.vsra.nxv4i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1674,10 +1674,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv4i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1691,9 +1691,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i8.i8( + %a = call @llvm.riscv.vsra.nxv8i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1706,10 +1706,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv8i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1723,9 +1723,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i8.i8( + %a = call @llvm.riscv.vsra.nxv16i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1738,10 +1738,10 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv16i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1755,9 +1755,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i8.i8( + %a = call @llvm.riscv.vsra.nxv32i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1770,10 +1770,10 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv32i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1787,9 +1787,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv64i8.i8( + %a = call @llvm.riscv.vsra.nxv64i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1802,10 +1802,10 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv64i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1819,9 +1819,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i16.i16( + %a = call @llvm.riscv.vsra.nxv1i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1834,10 +1834,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv1i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1851,9 +1851,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i16.i16( + %a = call @llvm.riscv.vsra.nxv2i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1866,10 +1866,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv2i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1883,9 +1883,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i16.i16( + %a = call @llvm.riscv.vsra.nxv4i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1898,10 +1898,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv4i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1915,9 +1915,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i16.i16( + %a = call @llvm.riscv.vsra.nxv8i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1930,10 +1930,10 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv8i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1947,9 +1947,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i16.i16( + %a = call @llvm.riscv.vsra.nxv16i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1962,10 +1962,10 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv16i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1979,9 +1979,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i16.i16( + %a = call @llvm.riscv.vsra.nxv32i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1994,10 +1994,10 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv32i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2011,7 +2011,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i32.i32( + %a = call @llvm.riscv.vsra.nxv1i32( %0, i32 9, i32 %1) @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv1i32( %0, %1, i32 9, @@ -2043,7 +2043,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i32.i32( + %a = call @llvm.riscv.vsra.nxv2i32( %0, i32 9, i32 %1) @@ -2058,7 +2058,7 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv2i32( %0, %1, i32 9, @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i32.i32( + %a = call @llvm.riscv.vsra.nxv4i32( %0, i32 9, i32 %1) @@ -2090,7 +2090,7 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv4i32( %0, %1, i32 9, @@ -2107,7 +2107,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i32.i32( + %a = call @llvm.riscv.vsra.nxv8i32( %0, i32 9, i32 %1) @@ -2122,7 +2122,7 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv8i32( %0, %1, i32 9, @@ -2139,7 +2139,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i32.i32( + %a = call @llvm.riscv.vsra.nxv16i32( %0, i32 9, i32 %1) @@ -2154,7 +2154,7 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv16i32( %0, %1, i32 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -973,811 +973,811 @@ ret %a } -declare @llvm.riscv.vsra.nxv1i8.i8( +declare @llvm.riscv.vsra.nxv1i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i8.i8( + %a = call @llvm.riscv.vsra.nxv1i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8.i8( +declare @llvm.riscv.vsra.mask.nxv1i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv1i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv2i8.i8( +declare @llvm.riscv.vsra.nxv2i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i8.i8( + %a = call @llvm.riscv.vsra.nxv2i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8.i8( +declare @llvm.riscv.vsra.mask.nxv2i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv2i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv4i8.i8( +declare @llvm.riscv.vsra.nxv4i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i8.i8( + %a = call @llvm.riscv.vsra.nxv4i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8.i8( +declare @llvm.riscv.vsra.mask.nxv4i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv4i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv8i8.i8( +declare @llvm.riscv.vsra.nxv8i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i8.i8( + %a = call @llvm.riscv.vsra.nxv8i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8.i8( +declare @llvm.riscv.vsra.mask.nxv8i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv8i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv16i8.i8( +declare @llvm.riscv.vsra.nxv16i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i8.i8( + %a = call @llvm.riscv.vsra.nxv16i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8.i8( +declare @llvm.riscv.vsra.mask.nxv16i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv16i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv32i8.i8( +declare @llvm.riscv.vsra.nxv32i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i8.i8( + %a = call @llvm.riscv.vsra.nxv32i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8.i8( +declare @llvm.riscv.vsra.mask.nxv32i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv32i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv64i8.i8( +declare @llvm.riscv.vsra.nxv64i8( , - i8, + i64, i64); -define @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv64i8.i8( + %a = call @llvm.riscv.vsra.nxv64i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8.i8( +declare @llvm.riscv.vsra.mask.nxv64i8( , , - i8, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv64i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv1i16.i16( +declare @llvm.riscv.vsra.nxv1i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i16.i16( + %a = call @llvm.riscv.vsra.nxv1i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16.i16( +declare @llvm.riscv.vsra.mask.nxv1i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv1i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv2i16.i16( +declare @llvm.riscv.vsra.nxv2i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i16.i16( + %a = call @llvm.riscv.vsra.nxv2i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16.i16( +declare @llvm.riscv.vsra.mask.nxv2i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv2i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv4i16.i16( +declare @llvm.riscv.vsra.nxv4i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i16.i16( + %a = call @llvm.riscv.vsra.nxv4i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16.i16( +declare @llvm.riscv.vsra.mask.nxv4i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv4i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv8i16.i16( +declare @llvm.riscv.vsra.nxv8i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i16.i16( + %a = call @llvm.riscv.vsra.nxv8i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16.i16( +declare @llvm.riscv.vsra.mask.nxv8i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv8i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv16i16.i16( +declare @llvm.riscv.vsra.nxv16i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i16.i16( + %a = call @llvm.riscv.vsra.nxv16i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16.i16( +declare @llvm.riscv.vsra.mask.nxv16i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv16i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv32i16.i16( +declare @llvm.riscv.vsra.nxv32i16( , - i16, + i64, i64); -define @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i16.i16( + %a = call @llvm.riscv.vsra.nxv32i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16.i16( +declare @llvm.riscv.vsra.mask.nxv32i16( , , - i16, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv32i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv1i32.i32( +declare @llvm.riscv.vsra.nxv1i32( , - i32, + i64, i64); -define @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i32.i32( + %a = call @llvm.riscv.vsra.nxv1i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32.i32( +declare @llvm.riscv.vsra.mask.nxv1i32( , , - i32, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv1i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv2i32.i32( +declare @llvm.riscv.vsra.nxv2i32( , - i32, + i64, i64); -define @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i32.i32( + %a = call @llvm.riscv.vsra.nxv2i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32.i32( +declare @llvm.riscv.vsra.mask.nxv2i32( , , - i32, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv2i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv4i32.i32( +declare @llvm.riscv.vsra.nxv4i32( , - i32, + i64, i64); -define @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i32.i32( + %a = call @llvm.riscv.vsra.nxv4i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32.i32( +declare @llvm.riscv.vsra.mask.nxv4i32( , , - i32, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv4i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv8i32.i32( +declare @llvm.riscv.vsra.nxv8i32( , - i32, + i64, i64); -define @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i32.i32( + %a = call @llvm.riscv.vsra.nxv8i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32.i32( +declare @llvm.riscv.vsra.mask.nxv8i32( , , - i32, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv8i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv16i32.i32( +declare @llvm.riscv.vsra.nxv16i32( , - i32, + i64, i64); -define @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i32.i32( + %a = call @llvm.riscv.vsra.nxv16i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32.i32( +declare @llvm.riscv.vsra.mask.nxv16i32( , , - i32, + i64, , i64); -define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv16i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsra.nxv1i64.i64( +declare @llvm.riscv.vsra.nxv1i64( , i64, i64); -define @intrinsic_vsra_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i64.i64( + %a = call @llvm.riscv.vsra.nxv1i64( %0, i64 %1, i64 %2) @@ -1785,21 +1785,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv1i64.i64( +declare @llvm.riscv.vsra.mask.nxv1i64( , , i64, , i64); -define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv1i64( %0, %1, i64 %2, @@ -1809,19 +1809,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv2i64.i64( +declare @llvm.riscv.vsra.nxv2i64( , i64, i64); -define @intrinsic_vsra_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i64.i64( + %a = call @llvm.riscv.vsra.nxv2i64( %0, i64 %1, i64 %2) @@ -1829,21 +1829,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv2i64.i64( +declare @llvm.riscv.vsra.mask.nxv2i64( , , i64, , i64); -define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv2i64( %0, %1, i64 %2, @@ -1853,19 +1853,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv4i64.i64( +declare @llvm.riscv.vsra.nxv4i64( , i64, i64); -define @intrinsic_vsra_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i64.i64( + %a = call @llvm.riscv.vsra.nxv4i64( %0, i64 %1, i64 %2) @@ -1873,21 +1873,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv4i64.i64( +declare @llvm.riscv.vsra.mask.nxv4i64( , , i64, , i64); -define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv4i64( %0, %1, i64 %2, @@ -1897,19 +1897,19 @@ ret %a } -declare @llvm.riscv.vsra.nxv8i64.i64( +declare @llvm.riscv.vsra.nxv8i64( , i64, i64); -define @intrinsic_vsra_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i64.i64( + %a = call @llvm.riscv.vsra.nxv8i64( %0, i64 %1, i64 %2) @@ -1917,21 +1917,21 @@ ret %a } -declare @llvm.riscv.vsra.mask.nxv8i64.i64( +declare @llvm.riscv.vsra.mask.nxv8i64( , , i64, , i64); -define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv8i64( %0, %1, i64 %2, @@ -1948,9 +1948,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i8.i8( + %a = call @llvm.riscv.vsra.nxv1i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1963,10 +1963,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv1i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -1980,9 +1980,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i8.i8( + %a = call @llvm.riscv.vsra.nxv2i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1995,10 +1995,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv2i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2012,9 +2012,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i8.i8( + %a = call @llvm.riscv.vsra.nxv4i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2027,10 +2027,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv4i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2044,9 +2044,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i8.i8( + %a = call @llvm.riscv.vsra.nxv8i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2059,10 +2059,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv8i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2076,9 +2076,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i8.i8( + %a = call @llvm.riscv.vsra.nxv16i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2091,10 +2091,10 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv16i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2108,9 +2108,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i8.i8( + %a = call @llvm.riscv.vsra.nxv32i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2123,10 +2123,10 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv32i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv64i8.i8( + %a = call @llvm.riscv.vsra.nxv64i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2155,10 +2155,10 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsra.mask.nxv64i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2172,9 +2172,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i16.i16( + %a = call @llvm.riscv.vsra.nxv1i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2187,10 +2187,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv1i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2204,9 +2204,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i16.i16( + %a = call @llvm.riscv.vsra.nxv2i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2219,10 +2219,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv2i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2236,9 +2236,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i16.i16( + %a = call @llvm.riscv.vsra.nxv4i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2251,10 +2251,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv4i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2268,9 +2268,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i16.i16( + %a = call @llvm.riscv.vsra.nxv8i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2283,10 +2283,10 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv8i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2300,9 +2300,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i16.i16( + %a = call @llvm.riscv.vsra.nxv16i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2315,10 +2315,10 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv16i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2332,9 +2332,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv32i16.i16( + %a = call @llvm.riscv.vsra.nxv32i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2347,10 +2347,10 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsra.mask.nxv32i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2364,9 +2364,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i32.i32( + %a = call @llvm.riscv.vsra.nxv1i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2379,10 +2379,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv1i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2396,9 +2396,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i32.i32( + %a = call @llvm.riscv.vsra.nxv2i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2411,10 +2411,10 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv2i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2428,9 +2428,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i32.i32( + %a = call @llvm.riscv.vsra.nxv4i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2443,10 +2443,10 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv4i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2460,9 +2460,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i32.i32( + %a = call @llvm.riscv.vsra.nxv8i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2475,10 +2475,10 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv8i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2492,9 +2492,9 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv16i32.i32( + %a = call @llvm.riscv.vsra.nxv16i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2507,10 +2507,10 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsra.mask.nxv16i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2524,7 +2524,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv1i64.i64( + %a = call @llvm.riscv.vsra.nxv1i64( %0, i64 9, i64 %1) @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv1i64( %0, %1, i64 9, @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv2i64.i64( + %a = call @llvm.riscv.vsra.nxv2i64( %0, i64 9, i64 %1) @@ -2571,7 +2571,7 @@ ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv2i64( %0, %1, i64 9, @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv4i64.i64( + %a = call @llvm.riscv.vsra.nxv4i64( %0, i64 9, i64 %1) @@ -2603,7 +2603,7 @@ ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv4i64( %0, %1, i64 9, @@ -2620,7 +2620,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.nxv8i64.i64( + %a = call @llvm.riscv.vsra.nxv8i64( %0, i64 9, i64 %1) @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsra.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsra.mask.nxv8i64( %0, %1, i64 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -796,591 +796,591 @@ ret %a } -declare @llvm.riscv.vsrl.nxv1i8.i8( +declare @llvm.riscv.vsrl.nxv1i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.nxv1i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8.i8( +declare @llvm.riscv.vsrl.mask.nxv1i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv1i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv2i8.i8( +declare @llvm.riscv.vsrl.nxv2i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.nxv2i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8.i8( +declare @llvm.riscv.vsrl.mask.nxv2i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv2i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv4i8.i8( +declare @llvm.riscv.vsrl.nxv4i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.nxv4i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8.i8( +declare @llvm.riscv.vsrl.mask.nxv4i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv4i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv8i8.i8( +declare @llvm.riscv.vsrl.nxv8i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.nxv8i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8.i8( +declare @llvm.riscv.vsrl.mask.nxv8i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv8i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv16i8.i8( +declare @llvm.riscv.vsrl.nxv16i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.nxv16i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8.i8( +declare @llvm.riscv.vsrl.mask.nxv16i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv16i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv32i8.i8( +declare @llvm.riscv.vsrl.nxv32i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.nxv32i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8.i8( +declare @llvm.riscv.vsrl.mask.nxv32i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv32i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv64i8.i8( +declare @llvm.riscv.vsrl.nxv64i8( , - i8, + i32, i32); -define @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.nxv64i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8.i8( +declare @llvm.riscv.vsrl.mask.nxv64i8( , , - i8, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv64i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv1i16.i16( +declare @llvm.riscv.vsrl.nxv1i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.nxv1i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16.i16( +declare @llvm.riscv.vsrl.mask.nxv1i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv1i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv2i16.i16( +declare @llvm.riscv.vsrl.nxv2i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.nxv2i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16.i16( +declare @llvm.riscv.vsrl.mask.nxv2i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv2i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv4i16.i16( +declare @llvm.riscv.vsrl.nxv4i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.nxv4i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16.i16( +declare @llvm.riscv.vsrl.mask.nxv4i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv4i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv8i16.i16( +declare @llvm.riscv.vsrl.nxv8i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.nxv8i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16.i16( +declare @llvm.riscv.vsrl.mask.nxv8i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv8i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv16i16.i16( +declare @llvm.riscv.vsrl.nxv16i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.nxv16i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16.i16( +declare @llvm.riscv.vsrl.mask.nxv16i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv16i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv32i16.i16( +declare @llvm.riscv.vsrl.nxv32i16( , - i16, + i32, i32); -define @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.nxv32i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16.i16( +declare @llvm.riscv.vsrl.mask.nxv32i16( , , - i16, + i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv32i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vsrl.nxv1i32.i32( +declare @llvm.riscv.vsrl.nxv1i32( , i32, i32); -define @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.nxv1i32( %0, i32 %1, i32 %2) @@ -1388,21 +1388,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32.i32( +declare @llvm.riscv.vsrl.mask.nxv1i32( , , i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv1i32( %0, %1, i32 %2, @@ -1412,19 +1412,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv2i32.i32( +declare @llvm.riscv.vsrl.nxv2i32( , i32, i32); -define @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.nxv2i32( %0, i32 %1, i32 %2) @@ -1432,21 +1432,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32.i32( +declare @llvm.riscv.vsrl.mask.nxv2i32( , , i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv2i32( %0, %1, i32 %2, @@ -1456,19 +1456,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv4i32.i32( +declare @llvm.riscv.vsrl.nxv4i32( , i32, i32); -define @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.nxv4i32( %0, i32 %1, i32 %2) @@ -1476,21 +1476,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32.i32( +declare @llvm.riscv.vsrl.mask.nxv4i32( , , i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv4i32( %0, %1, i32 %2, @@ -1500,19 +1500,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv8i32.i32( +declare @llvm.riscv.vsrl.nxv8i32( , i32, i32); -define @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.nxv8i32( %0, i32 %1, i32 %2) @@ -1520,21 +1520,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32.i32( +declare @llvm.riscv.vsrl.mask.nxv8i32( , , i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv8i32( %0, %1, i32 %2, @@ -1544,19 +1544,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv16i32.i32( +declare @llvm.riscv.vsrl.nxv16i32( , i32, i32); -define @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.nxv16i32( %0, i32 %1, i32 %2) @@ -1564,21 +1564,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32.i32( +declare @llvm.riscv.vsrl.mask.nxv16i32( , , i32, , i32); -define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv16i32( %0, %1, i32 %2, @@ -1595,9 +1595,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.nxv1i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1610,10 +1610,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv1i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.nxv2i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1642,10 +1642,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv2i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1659,9 +1659,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.nxv4i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1674,10 +1674,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv4i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1691,9 +1691,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.nxv8i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1706,10 +1706,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv8i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1723,9 +1723,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.nxv16i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1738,10 +1738,10 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv16i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1755,9 +1755,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.nxv32i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1770,10 +1770,10 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv32i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1787,9 +1787,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.nxv64i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -1802,10 +1802,10 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv64i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -1819,9 +1819,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.nxv1i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1834,10 +1834,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv1i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1851,9 +1851,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.nxv2i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1866,10 +1866,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv2i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1883,9 +1883,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.nxv4i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1898,10 +1898,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv4i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1915,9 +1915,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.nxv8i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1930,10 +1930,10 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv8i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1947,9 +1947,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.nxv16i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1962,10 +1962,10 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv16i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -1979,9 +1979,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.nxv32i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -1994,10 +1994,10 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv32i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2011,7 +2011,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.nxv1i32( %0, i32 9, i32 %1) @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv1i32( %0, %1, i32 9, @@ -2043,7 +2043,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.nxv2i32( %0, i32 9, i32 %1) @@ -2058,7 +2058,7 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv2i32( %0, %1, i32 9, @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.nxv4i32( %0, i32 9, i32 %1) @@ -2090,7 +2090,7 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv4i32( %0, %1, i32 9, @@ -2107,7 +2107,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.nxv8i32( %0, i32 9, i32 %1) @@ -2122,7 +2122,7 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv8i32( %0, %1, i32 9, @@ -2139,7 +2139,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.nxv16i32( %0, i32 9, i32 %1) @@ -2154,7 +2154,7 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv16i32( %0, %1, i32 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -973,811 +973,811 @@ ret %a } -declare @llvm.riscv.vsrl.nxv1i8.i8( +declare @llvm.riscv.vsrl.nxv1i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.nxv1i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8.i8( +declare @llvm.riscv.vsrl.mask.nxv1i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv1i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv2i8.i8( +declare @llvm.riscv.vsrl.nxv2i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.nxv2i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8.i8( +declare @llvm.riscv.vsrl.mask.nxv2i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv2i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv4i8.i8( +declare @llvm.riscv.vsrl.nxv4i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.nxv4i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8.i8( +declare @llvm.riscv.vsrl.mask.nxv4i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv4i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv8i8.i8( +declare @llvm.riscv.vsrl.nxv8i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.nxv8i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8.i8( +declare @llvm.riscv.vsrl.mask.nxv8i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv8i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv16i8.i8( +declare @llvm.riscv.vsrl.nxv16i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.nxv16i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8.i8( +declare @llvm.riscv.vsrl.mask.nxv16i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv16i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv32i8.i8( +declare @llvm.riscv.vsrl.nxv32i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.nxv32i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8.i8( +declare @llvm.riscv.vsrl.mask.nxv32i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv32i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv64i8.i8( +declare @llvm.riscv.vsrl.nxv64i8( , - i8, + i64, i64); -define @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.nxv64i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8.i8( +declare @llvm.riscv.vsrl.mask.nxv64i8( , , - i8, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv64i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv1i16.i16( +declare @llvm.riscv.vsrl.nxv1i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.nxv1i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16.i16( +declare @llvm.riscv.vsrl.mask.nxv1i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv1i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv2i16.i16( +declare @llvm.riscv.vsrl.nxv2i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.nxv2i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16.i16( +declare @llvm.riscv.vsrl.mask.nxv2i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv2i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv4i16.i16( +declare @llvm.riscv.vsrl.nxv4i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.nxv4i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16.i16( +declare @llvm.riscv.vsrl.mask.nxv4i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv4i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv8i16.i16( +declare @llvm.riscv.vsrl.nxv8i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.nxv8i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16.i16( +declare @llvm.riscv.vsrl.mask.nxv8i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv8i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv16i16.i16( +declare @llvm.riscv.vsrl.nxv16i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.nxv16i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16.i16( +declare @llvm.riscv.vsrl.mask.nxv16i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv16i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv32i16.i16( +declare @llvm.riscv.vsrl.nxv32i16( , - i16, + i64, i64); -define @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.nxv32i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16.i16( +declare @llvm.riscv.vsrl.mask.nxv32i16( , , - i16, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv32i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv1i32.i32( +declare @llvm.riscv.vsrl.nxv1i32( , - i32, + i64, i64); -define @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.nxv1i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32.i32( +declare @llvm.riscv.vsrl.mask.nxv1i32( , , - i32, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv1i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv2i32.i32( +declare @llvm.riscv.vsrl.nxv2i32( , - i32, + i64, i64); -define @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.nxv2i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32.i32( +declare @llvm.riscv.vsrl.mask.nxv2i32( , , - i32, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv2i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv4i32.i32( +declare @llvm.riscv.vsrl.nxv4i32( , - i32, + i64, i64); -define @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.nxv4i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32.i32( +declare @llvm.riscv.vsrl.mask.nxv4i32( , , - i32, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv4i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv8i32.i32( +declare @llvm.riscv.vsrl.nxv8i32( , - i32, + i64, i64); -define @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.nxv8i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32.i32( +declare @llvm.riscv.vsrl.mask.nxv8i32( , , - i32, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv8i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv16i32.i32( +declare @llvm.riscv.vsrl.nxv16i32( , - i32, + i64, i64); -define @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.nxv16i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32.i32( +declare @llvm.riscv.vsrl.mask.nxv16i32( , , - i32, + i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv16i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vsrl.nxv1i64.i64( +declare @llvm.riscv.vsrl.nxv1i64( , i64, i64); -define @intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i64.i64( + %a = call @llvm.riscv.vsrl.nxv1i64( %0, i64 %1, i64 %2) @@ -1785,21 +1785,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i64.i64( +declare @llvm.riscv.vsrl.mask.nxv1i64( , , i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv1i64( %0, %1, i64 %2, @@ -1809,19 +1809,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv2i64.i64( +declare @llvm.riscv.vsrl.nxv2i64( , i64, i64); -define @intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i64.i64( + %a = call @llvm.riscv.vsrl.nxv2i64( %0, i64 %1, i64 %2) @@ -1829,21 +1829,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i64.i64( +declare @llvm.riscv.vsrl.mask.nxv2i64( , , i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv2i64( %0, %1, i64 %2, @@ -1853,19 +1853,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv4i64.i64( +declare @llvm.riscv.vsrl.nxv4i64( , i64, i64); -define @intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i64.i64( + %a = call @llvm.riscv.vsrl.nxv4i64( %0, i64 %1, i64 %2) @@ -1873,21 +1873,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i64.i64( +declare @llvm.riscv.vsrl.mask.nxv4i64( , , i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv4i64( %0, %1, i64 %2, @@ -1897,19 +1897,19 @@ ret %a } -declare @llvm.riscv.vsrl.nxv8i64.i64( +declare @llvm.riscv.vsrl.nxv8i64( , i64, i64); -define @intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i64.i64( + %a = call @llvm.riscv.vsrl.nxv8i64( %0, i64 %1, i64 %2) @@ -1917,21 +1917,21 @@ ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i64.i64( +declare @llvm.riscv.vsrl.mask.nxv8i64( , , i64, , i64); -define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv8i64( %0, %1, i64 %2, @@ -1948,9 +1948,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.nxv1i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1963,10 +1963,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv1i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -1980,9 +1980,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.nxv2i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1995,10 +1995,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv2i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2012,9 +2012,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.nxv4i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2027,10 +2027,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv4i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2044,9 +2044,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.nxv8i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2059,10 +2059,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv8i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2076,9 +2076,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.nxv16i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2091,10 +2091,10 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv16i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2108,9 +2108,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.nxv32i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2123,10 +2123,10 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv32i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.nxv64i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2155,10 +2155,10 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vsrl.mask.nxv64i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2172,9 +2172,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.nxv1i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2187,10 +2187,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv1i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2204,9 +2204,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.nxv2i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2219,10 +2219,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv2i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2236,9 +2236,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.nxv4i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2251,10 +2251,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv4i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2268,9 +2268,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.nxv8i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2283,10 +2283,10 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv8i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2300,9 +2300,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.nxv16i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2315,10 +2315,10 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv16i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2332,9 +2332,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.nxv32i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2347,10 +2347,10 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vsrl.mask.nxv32i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2364,9 +2364,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.nxv1i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2379,10 +2379,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv1i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2396,9 +2396,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.nxv2i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2411,10 +2411,10 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv2i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2428,9 +2428,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.nxv4i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2443,10 +2443,10 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv4i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2460,9 +2460,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.nxv8i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2475,10 +2475,10 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv8i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2492,9 +2492,9 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.nxv16i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2507,10 +2507,10 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vsrl.mask.nxv16i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2524,7 +2524,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv1i64.i64( + %a = call @llvm.riscv.vsrl.nxv1i64( %0, i64 9, i64 %1) @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv1i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv1i64( %0, %1, i64 9, @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv2i64.i64( + %a = call @llvm.riscv.vsrl.nxv2i64( %0, i64 9, i64 %1) @@ -2571,7 +2571,7 @@ ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv2i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv2i64( %0, %1, i64 9, @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv4i64.i64( + %a = call @llvm.riscv.vsrl.nxv4i64( %0, i64 9, i64 %1) @@ -2603,7 +2603,7 @@ ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv4i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv4i64( %0, %1, i64 9, @@ -2620,7 +2620,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.nxv8i64.i64( + %a = call @llvm.riscv.vsrl.nxv8i64( %0, i64 9, i64 %1) @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vsrl.mask.nxv8i64.i64( + %a = call @llvm.riscv.vsrl.mask.nxv8i64( %0, %1, i64 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -796,768 +796,591 @@ ret %a } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64( - , - , - i32); - -define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu -; CHECK-NEXT: vssra.vv v8, v8, v9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv1i64.nxv1i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64( - , - , - , - , - i32); - -define @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu -; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssra.nxv2i64.nxv2i64( - , - , - i32); - -define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu -; CHECK-NEXT: vssra.vv v8, v8, v10 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv2i64.nxv2i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64( - , - , - , - , - i32); - -define @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu -; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssra.nxv4i64.nxv4i64( - , - , - i32); - -define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu -; CHECK-NEXT: vssra.vv v8, v8, v12 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv4i64.nxv4i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64( - , - , - , - , - i32); - -define @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu -; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssra.nxv8i64.nxv8i64( - , - , - i32); - -define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu -; CHECK-NEXT: vssra.vv v8, v8, v16 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv8i64.nxv8i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64( - , - , - , - , - i32); - -define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssra.nxv1i8.i8( +declare @llvm.riscv.vssra.nxv1i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i8.i8( + %a = call @llvm.riscv.vssra.nxv1i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i8.i8( +declare @llvm.riscv.vssra.mask.nxv1i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv1i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i8.i8( +declare @llvm.riscv.vssra.nxv2i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i8.i8( + %a = call @llvm.riscv.vssra.nxv2i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i8.i8( +declare @llvm.riscv.vssra.mask.nxv2i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv2i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i8.i8( +declare @llvm.riscv.vssra.nxv4i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i8.i8( + %a = call @llvm.riscv.vssra.nxv4i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i8.i8( +declare @llvm.riscv.vssra.mask.nxv4i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv4i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i8.i8( +declare @llvm.riscv.vssra.nxv8i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i8.i8( + %a = call @llvm.riscv.vssra.nxv8i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i8.i8( +declare @llvm.riscv.vssra.mask.nxv8i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv8i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv16i8.i8( +declare @llvm.riscv.vssra.nxv16i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i8.i8( + %a = call @llvm.riscv.vssra.nxv16i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv16i8.i8( +declare @llvm.riscv.vssra.mask.nxv16i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv16i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv32i8.i8( +declare @llvm.riscv.vssra.nxv32i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i8.i8( + %a = call @llvm.riscv.vssra.nxv32i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv32i8.i8( +declare @llvm.riscv.vssra.mask.nxv32i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv32i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv64i8.i8( +declare @llvm.riscv.vssra.nxv64i8( , - i8, + i32, i32); -define @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv64i8.i8( + %a = call @llvm.riscv.vssra.nxv64i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv64i8.i8( +declare @llvm.riscv.vssra.mask.nxv64i8( , , - i8, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv64i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv1i16.i16( +declare @llvm.riscv.vssra.nxv1i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i16.i16( + %a = call @llvm.riscv.vssra.nxv1i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i16.i16( +declare @llvm.riscv.vssra.mask.nxv1i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv1i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i16.i16( +declare @llvm.riscv.vssra.nxv2i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i16.i16( + %a = call @llvm.riscv.vssra.nxv2i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i16.i16( +declare @llvm.riscv.vssra.mask.nxv2i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv2i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i16.i16( +declare @llvm.riscv.vssra.nxv4i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i16.i16( + %a = call @llvm.riscv.vssra.nxv4i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i16.i16( +declare @llvm.riscv.vssra.mask.nxv4i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv4i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i16.i16( +declare @llvm.riscv.vssra.nxv8i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i16.i16( + %a = call @llvm.riscv.vssra.nxv8i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i16.i16( +declare @llvm.riscv.vssra.mask.nxv8i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv8i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv16i16.i16( +declare @llvm.riscv.vssra.nxv16i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i16.i16( + %a = call @llvm.riscv.vssra.nxv16i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv16i16.i16( +declare @llvm.riscv.vssra.mask.nxv16i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv16i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv32i16.i16( +declare @llvm.riscv.vssra.nxv32i16( , - i16, + i32, i32); -define @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i16.i16( + %a = call @llvm.riscv.vssra.nxv32i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv32i16.i16( +declare @llvm.riscv.vssra.mask.nxv32i16( , , - i16, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv32i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv1i32.i32( +declare @llvm.riscv.vssra.nxv1i32( , i32, i32); -define @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i32.i32( + %a = call @llvm.riscv.vssra.nxv1i32( %0, i32 %1, i32 %2) @@ -1565,21 +1388,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv1i32.i32( +declare @llvm.riscv.vssra.mask.nxv1i32( , , i32, , i32); -define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv1i32( %0, %1, i32 %2, @@ -1589,19 +1412,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv2i32.i32( +declare @llvm.riscv.vssra.nxv2i32( , i32, i32); -define @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i32.i32( + %a = call @llvm.riscv.vssra.nxv2i32( %0, i32 %1, i32 %2) @@ -1609,21 +1432,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv2i32.i32( +declare @llvm.riscv.vssra.mask.nxv2i32( , , i32, , i32); -define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv2i32( %0, %1, i32 %2, @@ -1633,19 +1456,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv4i32.i32( +declare @llvm.riscv.vssra.nxv4i32( , i32, i32); -define @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i32.i32( + %a = call @llvm.riscv.vssra.nxv4i32( %0, i32 %1, i32 %2) @@ -1653,21 +1476,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv4i32.i32( +declare @llvm.riscv.vssra.mask.nxv4i32( , , i32, , i32); -define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv4i32( %0, %1, i32 %2, @@ -1677,19 +1500,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv8i32.i32( +declare @llvm.riscv.vssra.nxv8i32( , i32, i32); -define @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i32.i32( + %a = call @llvm.riscv.vssra.nxv8i32( %0, i32 %1, i32 %2) @@ -1697,21 +1520,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv8i32.i32( +declare @llvm.riscv.vssra.mask.nxv8i32( , , i32, , i32); -define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv8i32( %0, %1, i32 %2, @@ -1721,19 +1544,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv16i32.i32( +declare @llvm.riscv.vssra.nxv16i32( , i32, i32); -define @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i32.i32( + %a = call @llvm.riscv.vssra.nxv16i32( %0, i32 %1, i32 %2) @@ -1741,21 +1564,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv16i32.i32( +declare @llvm.riscv.vssra.mask.nxv16i32( , , i32, , i32); -define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv16i32( %0, %1, i32 %2, @@ -1765,246 +1588,176 @@ ret %a } -declare @llvm.riscv.vssra.nxv1i64.i64( +declare @llvm.riscv.vssra.nxv1i64( , - i64, + i32, i32); -define @intrinsic_vssra_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v25, v25, a1 -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vor.vv v25, v26, v25 -; CHECK-NEXT: vssra.vv v8, v8, v25 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i64.i64( + %a = call @llvm.riscv.vssra.nxv1i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i64.i64( +declare @llvm.riscv.vssra.mask.nxv1i64( , , - i64, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v25, v25, a1 -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vor.vv v25, v26, v25 -; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu -; CHECK-NEXT: vssra.vv v8, v9, v25, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv1i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i64.i64( +declare @llvm.riscv.vssra.nxv2i64( , - i64, + i32, i32); -define @intrinsic_vssra_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vor.vv v26, v28, v26 -; CHECK-NEXT: vssra.vv v8, v8, v26 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i64.i64( + %a = call @llvm.riscv.vssra.nxv2i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i64.i64( +declare @llvm.riscv.vssra.mask.nxv2i64( , , - i64, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vor.vv v26, v28, v26 -; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu -; CHECK-NEXT: vssra.vv v8, v10, v26, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv2i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i64.i64( +declare @llvm.riscv.vssra.nxv4i64( , - i64, + i32, i32); -define @intrinsic_vssra_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vmv.v.x v12, a0 -; CHECK-NEXT: vsll.vx v12, v12, a1 -; CHECK-NEXT: vsrl.vx v12, v12, a1 -; CHECK-NEXT: vor.vv v28, v12, v28 -; CHECK-NEXT: vssra.vv v8, v8, v28 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i64.i64( + %a = call @llvm.riscv.vssra.nxv4i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i64.i64( +declare @llvm.riscv.vssra.mask.nxv4i64( , , - i64, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vsll.vx v16, v16, a1 -; CHECK-NEXT: vsrl.vx v16, v16, a1 -; CHECK-NEXT: vor.vv v28, v16, v28 -; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu -; CHECK-NEXT: vssra.vv v8, v12, v28, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv4i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i64.i64( +declare @llvm.riscv.vssra.nxv8i64( , - i64, + i32, i32); -define @intrinsic_vssra_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v16, v16, a1 -; CHECK-NEXT: vmv.v.x v24, a0 -; CHECK-NEXT: vsll.vx v24, v24, a1 -; CHECK-NEXT: vsrl.vx v24, v24, a1 -; CHECK-NEXT: vor.vv v16, v24, v16 -; CHECK-NEXT: vssra.vv v8, v8, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i64.i64( + %a = call @llvm.riscv.vssra.nxv8i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i64.i64( +declare @llvm.riscv.vssra.mask.nxv8i64( , , - i64, + i32, , i32); -define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: csrrs a3, vlenb, zero -; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v0, v24, a1 -; CHECK-NEXT: vmv.v.x v24, a0 -; CHECK-NEXT: vsll.vx v24, v24, a1 -; CHECK-NEXT: vsrl.vx v24, v24, a1 -; CHECK-NEXT: vor.vv v24, v24, v0 -; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t -; CHECK-NEXT: csrrs a0, vlenb, zero -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: addi sp, sp, 16 +define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv8i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) @@ -2018,9 +1771,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i8.i8( + %a = call @llvm.riscv.vssra.nxv1i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2033,10 +1786,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv1i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2050,9 +1803,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i8.i8( + %a = call @llvm.riscv.vssra.nxv2i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2065,10 +1818,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv2i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2082,9 +1835,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i8.i8( + %a = call @llvm.riscv.vssra.nxv4i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2097,10 +1850,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv4i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2114,9 +1867,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i8.i8( + %a = call @llvm.riscv.vssra.nxv8i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2129,10 +1882,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv8i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2146,9 +1899,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i8.i8( + %a = call @llvm.riscv.vssra.nxv16i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2161,10 +1914,10 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv16i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2178,9 +1931,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i8.i8( + %a = call @llvm.riscv.vssra.nxv32i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2193,10 +1946,10 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv32i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2210,9 +1963,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv64i8.i8( + %a = call @llvm.riscv.vssra.nxv64i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2225,10 +1978,10 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv64i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2242,9 +1995,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i16.i16( + %a = call @llvm.riscv.vssra.nxv1i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2257,10 +2010,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv1i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2274,9 +2027,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i16.i16( + %a = call @llvm.riscv.vssra.nxv2i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2289,10 +2042,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv2i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2306,9 +2059,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i16.i16( + %a = call @llvm.riscv.vssra.nxv4i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2321,10 +2074,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv4i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2338,9 +2091,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i16.i16( + %a = call @llvm.riscv.vssra.nxv8i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2353,10 +2106,10 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv8i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2370,9 +2123,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i16.i16( + %a = call @llvm.riscv.vssra.nxv16i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2385,10 +2138,10 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv16i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2402,9 +2155,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i16.i16( + %a = call @llvm.riscv.vssra.nxv32i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2417,10 +2170,10 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv32i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2434,7 +2187,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i32.i32( + %a = call @llvm.riscv.vssra.nxv1i32( %0, i32 9, i32 %1) @@ -2449,7 +2202,7 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv1i32( %0, %1, i32 9, @@ -2466,7 +2219,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i32.i32( + %a = call @llvm.riscv.vssra.nxv2i32( %0, i32 9, i32 %1) @@ -2481,7 +2234,7 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv2i32( %0, %1, i32 9, @@ -2498,7 +2251,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i32.i32( + %a = call @llvm.riscv.vssra.nxv4i32( %0, i32 9, i32 %1) @@ -2513,7 +2266,7 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv4i32( %0, %1, i32 9, @@ -2530,7 +2283,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i32.i32( + %a = call @llvm.riscv.vssra.nxv8i32( %0, i32 9, i32 %1) @@ -2545,7 +2298,7 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv8i32( %0, %1, i32 9, @@ -2562,7 +2315,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i32.i32( + %a = call @llvm.riscv.vssra.nxv16i32( %0, i32 9, i32 %1) @@ -2577,7 +2330,7 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv16i32( %0, %1, i32 9, @@ -2586,131 +2339,3 @@ ret %a } - -define @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv1i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu -; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv2i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu -; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv4i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu -; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu -; CHECK-NEXT: vssra.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.nxv8i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu -; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -973,811 +973,811 @@ ret %a } -declare @llvm.riscv.vssra.nxv1i8.i8( +declare @llvm.riscv.vssra.nxv1i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i8.i8( + %a = call @llvm.riscv.vssra.nxv1i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i8.i8( +declare @llvm.riscv.vssra.mask.nxv1i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv1i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i8.i8( +declare @llvm.riscv.vssra.nxv2i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i8.i8( + %a = call @llvm.riscv.vssra.nxv2i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i8.i8( +declare @llvm.riscv.vssra.mask.nxv2i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv2i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i8.i8( +declare @llvm.riscv.vssra.nxv4i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i8.i8( + %a = call @llvm.riscv.vssra.nxv4i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i8.i8( +declare @llvm.riscv.vssra.mask.nxv4i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv4i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i8.i8( +declare @llvm.riscv.vssra.nxv8i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i8.i8( + %a = call @llvm.riscv.vssra.nxv8i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i8.i8( +declare @llvm.riscv.vssra.mask.nxv8i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv8i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv16i8.i8( +declare @llvm.riscv.vssra.nxv16i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i8.i8( + %a = call @llvm.riscv.vssra.nxv16i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv16i8.i8( +declare @llvm.riscv.vssra.mask.nxv16i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv16i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv32i8.i8( +declare @llvm.riscv.vssra.nxv32i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i8.i8( + %a = call @llvm.riscv.vssra.nxv32i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv32i8.i8( +declare @llvm.riscv.vssra.mask.nxv32i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv32i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv64i8.i8( +declare @llvm.riscv.vssra.nxv64i8( , - i8, + i64, i64); -define @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv64i8.i8( + %a = call @llvm.riscv.vssra.nxv64i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv64i8.i8( +declare @llvm.riscv.vssra.mask.nxv64i8( , , - i8, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv64i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv1i16.i16( +declare @llvm.riscv.vssra.nxv1i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i16.i16( + %a = call @llvm.riscv.vssra.nxv1i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i16.i16( +declare @llvm.riscv.vssra.mask.nxv1i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv1i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i16.i16( +declare @llvm.riscv.vssra.nxv2i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i16.i16( + %a = call @llvm.riscv.vssra.nxv2i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i16.i16( +declare @llvm.riscv.vssra.mask.nxv2i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv2i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i16.i16( +declare @llvm.riscv.vssra.nxv4i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i16.i16( + %a = call @llvm.riscv.vssra.nxv4i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i16.i16( +declare @llvm.riscv.vssra.mask.nxv4i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv4i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i16.i16( +declare @llvm.riscv.vssra.nxv8i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i16.i16( + %a = call @llvm.riscv.vssra.nxv8i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i16.i16( +declare @llvm.riscv.vssra.mask.nxv8i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv8i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv16i16.i16( +declare @llvm.riscv.vssra.nxv16i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i16.i16( + %a = call @llvm.riscv.vssra.nxv16i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv16i16.i16( +declare @llvm.riscv.vssra.mask.nxv16i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv16i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv32i16.i16( +declare @llvm.riscv.vssra.nxv32i16( , - i16, + i64, i64); -define @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i16.i16( + %a = call @llvm.riscv.vssra.nxv32i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv32i16.i16( +declare @llvm.riscv.vssra.mask.nxv32i16( , , - i16, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv32i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv1i32.i32( +declare @llvm.riscv.vssra.nxv1i32( , - i32, + i64, i64); -define @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i32.i32( + %a = call @llvm.riscv.vssra.nxv1i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv1i32.i32( +declare @llvm.riscv.vssra.mask.nxv1i32( , , - i32, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv1i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv2i32.i32( +declare @llvm.riscv.vssra.nxv2i32( , - i32, + i64, i64); -define @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i32.i32( + %a = call @llvm.riscv.vssra.nxv2i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv2i32.i32( +declare @llvm.riscv.vssra.mask.nxv2i32( , , - i32, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv2i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv4i32.i32( +declare @llvm.riscv.vssra.nxv4i32( , - i32, + i64, i64); -define @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i32.i32( + %a = call @llvm.riscv.vssra.nxv4i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv4i32.i32( +declare @llvm.riscv.vssra.mask.nxv4i32( , , - i32, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv4i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv8i32.i32( +declare @llvm.riscv.vssra.nxv8i32( , - i32, + i64, i64); -define @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i32.i32( + %a = call @llvm.riscv.vssra.nxv8i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv8i32.i32( +declare @llvm.riscv.vssra.mask.nxv8i32( , , - i32, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv8i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv16i32.i32( +declare @llvm.riscv.vssra.nxv16i32( , - i32, + i64, i64); -define @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i32.i32( + %a = call @llvm.riscv.vssra.nxv16i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssra.mask.nxv16i32.i32( +declare @llvm.riscv.vssra.mask.nxv16i32( , , - i32, + i64, , i64); -define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv16i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssra.nxv1i64.i64( +declare @llvm.riscv.vssra.nxv1i64( , i64, i64); -define @intrinsic_vssra_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i64.i64( + %a = call @llvm.riscv.vssra.nxv1i64( %0, i64 %1, i64 %2) @@ -1785,21 +1785,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv1i64.i64( +declare @llvm.riscv.vssra.mask.nxv1i64( , , i64, , i64); -define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv1i64( %0, %1, i64 %2, @@ -1809,19 +1809,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv2i64.i64( +declare @llvm.riscv.vssra.nxv2i64( , i64, i64); -define @intrinsic_vssra_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i64.i64( + %a = call @llvm.riscv.vssra.nxv2i64( %0, i64 %1, i64 %2) @@ -1829,21 +1829,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv2i64.i64( +declare @llvm.riscv.vssra.mask.nxv2i64( , , i64, , i64); -define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv2i64( %0, %1, i64 %2, @@ -1853,19 +1853,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv4i64.i64( +declare @llvm.riscv.vssra.nxv4i64( , i64, i64); -define @intrinsic_vssra_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i64.i64( + %a = call @llvm.riscv.vssra.nxv4i64( %0, i64 %1, i64 %2) @@ -1873,21 +1873,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv4i64.i64( +declare @llvm.riscv.vssra.mask.nxv4i64( , , i64, , i64); -define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv4i64( %0, %1, i64 %2, @@ -1897,19 +1897,19 @@ ret %a } -declare @llvm.riscv.vssra.nxv8i64.i64( +declare @llvm.riscv.vssra.nxv8i64( , i64, i64); -define @intrinsic_vssra_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i64.i64( + %a = call @llvm.riscv.vssra.nxv8i64( %0, i64 %1, i64 %2) @@ -1917,21 +1917,21 @@ ret %a } -declare @llvm.riscv.vssra.mask.nxv8i64.i64( +declare @llvm.riscv.vssra.mask.nxv8i64( , , i64, , i64); -define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv8i64( %0, %1, i64 %2, @@ -1948,9 +1948,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i8.i8( + %a = call @llvm.riscv.vssra.nxv1i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1963,10 +1963,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv1i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -1980,9 +1980,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i8.i8( + %a = call @llvm.riscv.vssra.nxv2i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1995,10 +1995,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv2i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2012,9 +2012,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i8.i8( + %a = call @llvm.riscv.vssra.nxv4i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2027,10 +2027,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv4i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2044,9 +2044,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i8.i8( + %a = call @llvm.riscv.vssra.nxv8i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2059,10 +2059,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv8i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2076,9 +2076,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i8.i8( + %a = call @llvm.riscv.vssra.nxv16i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2091,10 +2091,10 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv16i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2108,9 +2108,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i8.i8( + %a = call @llvm.riscv.vssra.nxv32i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2123,10 +2123,10 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv32i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv64i8.i8( + %a = call @llvm.riscv.vssra.nxv64i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2155,10 +2155,10 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssra.mask.nxv64i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2172,9 +2172,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i16.i16( + %a = call @llvm.riscv.vssra.nxv1i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2187,10 +2187,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv1i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2204,9 +2204,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i16.i16( + %a = call @llvm.riscv.vssra.nxv2i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2219,10 +2219,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv2i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2236,9 +2236,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i16.i16( + %a = call @llvm.riscv.vssra.nxv4i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2251,10 +2251,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv4i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2268,9 +2268,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i16.i16( + %a = call @llvm.riscv.vssra.nxv8i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2283,10 +2283,10 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv8i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2300,9 +2300,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i16.i16( + %a = call @llvm.riscv.vssra.nxv16i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2315,10 +2315,10 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv16i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2332,9 +2332,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv32i16.i16( + %a = call @llvm.riscv.vssra.nxv32i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2347,10 +2347,10 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssra.mask.nxv32i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2364,9 +2364,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i32.i32( + %a = call @llvm.riscv.vssra.nxv1i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2379,10 +2379,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv1i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2396,9 +2396,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i32.i32( + %a = call @llvm.riscv.vssra.nxv2i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2411,10 +2411,10 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv2i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2428,9 +2428,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i32.i32( + %a = call @llvm.riscv.vssra.nxv4i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2443,10 +2443,10 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv4i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2460,9 +2460,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i32.i32( + %a = call @llvm.riscv.vssra.nxv8i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2475,10 +2475,10 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv8i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2492,9 +2492,9 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv16i32.i32( + %a = call @llvm.riscv.vssra.nxv16i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2507,10 +2507,10 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssra.mask.nxv16i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2524,7 +2524,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv1i64.i64( + %a = call @llvm.riscv.vssra.nxv1i64( %0, i64 9, i64 %1) @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv1i64( %0, %1, i64 9, @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv2i64.i64( + %a = call @llvm.riscv.vssra.nxv2i64( %0, i64 9, i64 %1) @@ -2571,7 +2571,7 @@ ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv2i64( %0, %1, i64 9, @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv4i64.i64( + %a = call @llvm.riscv.vssra.nxv4i64( %0, i64 9, i64 %1) @@ -2603,7 +2603,7 @@ ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv4i64( %0, %1, i64 9, @@ -2620,7 +2620,7 @@ ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.nxv8i64.i64( + %a = call @llvm.riscv.vssra.nxv8i64( %0, i64 9, i64 %1) @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssra.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssra.mask.nxv8i64( %0, %1, i64 9, diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -796,768 +796,591 @@ ret %a } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64( - , - , - i32); - -define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu -; CHECK-NEXT: vssrl.vv v8, v8, v9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv1i64.nxv1i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64( - , - , - , - , - i32); - -define @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu -; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64( - , - , - i32); - -define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu -; CHECK-NEXT: vssrl.vv v8, v8, v10 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv2i64.nxv2i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64( - , - , - , - , - i32); - -define @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu -; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64( - , - , - i32); - -define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu -; CHECK-NEXT: vssrl.vv v8, v8, v12 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv4i64.nxv4i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64( - , - , - , - , - i32); - -define @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu -; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64( - , - , - i32); - -define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu -; CHECK-NEXT: vssrl.vv v8, v8, v16 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv8i64.nxv8i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64( - , - , - , - , - i32); - -define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vssrl.nxv1i8.i8( +declare @llvm.riscv.vssrl.nxv1i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.nxv1i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.i8( +declare @llvm.riscv.vssrl.mask.nxv1i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv1i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i8.i8( +declare @llvm.riscv.vssrl.nxv2i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.nxv2i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i8.i8( +declare @llvm.riscv.vssrl.mask.nxv2i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv2i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i8.i8( +declare @llvm.riscv.vssrl.nxv4i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.nxv4i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i8.i8( +declare @llvm.riscv.vssrl.mask.nxv4i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv4i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i8.i8( +declare @llvm.riscv.vssrl.nxv8i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.nxv8i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i8.i8( +declare @llvm.riscv.vssrl.mask.nxv8i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv8i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv16i8.i8( +declare @llvm.riscv.vssrl.nxv16i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.nxv16i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i8.i8( +declare @llvm.riscv.vssrl.mask.nxv16i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv16i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv32i8.i8( +declare @llvm.riscv.vssrl.nxv32i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.nxv32i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv32i8.i8( +declare @llvm.riscv.vssrl.mask.nxv32i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv32i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv64i8.i8( +declare @llvm.riscv.vssrl.nxv64i8( , - i8, + i32, i32); -define @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.nxv64i8( %0, - i8 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv64i8.i8( +declare @llvm.riscv.vssrl.mask.nxv64i8( , , - i8, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv64i8( %0, %1, - i8 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv1i16.i16( +declare @llvm.riscv.vssrl.nxv1i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.nxv1i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i16.i16( +declare @llvm.riscv.vssrl.mask.nxv1i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv1i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i16.i16( +declare @llvm.riscv.vssrl.nxv2i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.nxv2i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i16.i16( +declare @llvm.riscv.vssrl.mask.nxv2i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv2i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i16.i16( +declare @llvm.riscv.vssrl.nxv4i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.nxv4i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i16.i16( +declare @llvm.riscv.vssrl.mask.nxv4i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv4i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i16.i16( +declare @llvm.riscv.vssrl.nxv8i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.nxv8i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i16.i16( +declare @llvm.riscv.vssrl.mask.nxv8i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv8i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv16i16.i16( +declare @llvm.riscv.vssrl.nxv16i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.nxv16i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i16.i16( +declare @llvm.riscv.vssrl.mask.nxv16i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv16i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv32i16.i16( +declare @llvm.riscv.vssrl.nxv32i16( , - i16, + i32, i32); -define @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.nxv32i16( %0, - i16 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv32i16.i16( +declare @llvm.riscv.vssrl.mask.nxv32i16( , , - i16, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv32i16( %0, %1, - i16 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv1i32.i32( +declare @llvm.riscv.vssrl.nxv1i32( , i32, i32); -define @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.nxv1i32( %0, i32 %1, i32 %2) @@ -1565,21 +1388,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i32.i32( +declare @llvm.riscv.vssrl.mask.nxv1i32( , , i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv1i32( %0, %1, i32 %2, @@ -1589,19 +1412,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv2i32.i32( +declare @llvm.riscv.vssrl.nxv2i32( , i32, i32); -define @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.nxv2i32( %0, i32 %1, i32 %2) @@ -1609,21 +1432,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i32.i32( +declare @llvm.riscv.vssrl.mask.nxv2i32( , , i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv2i32( %0, %1, i32 %2, @@ -1633,19 +1456,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv4i32.i32( +declare @llvm.riscv.vssrl.nxv4i32( , i32, i32); -define @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.nxv4i32( %0, i32 %1, i32 %2) @@ -1653,21 +1476,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i32.i32( +declare @llvm.riscv.vssrl.mask.nxv4i32( , , i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv4i32( %0, %1, i32 %2, @@ -1677,19 +1500,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv8i32.i32( +declare @llvm.riscv.vssrl.nxv8i32( , i32, i32); -define @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.nxv8i32( %0, i32 %1, i32 %2) @@ -1697,21 +1520,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i32.i32( +declare @llvm.riscv.vssrl.mask.nxv8i32( , , i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv8i32( %0, %1, i32 %2, @@ -1721,19 +1544,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv16i32.i32( +declare @llvm.riscv.vssrl.nxv16i32( , i32, i32); -define @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.nxv16i32( %0, i32 %1, i32 %2) @@ -1741,21 +1564,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i32.i32( +declare @llvm.riscv.vssrl.mask.nxv16i32( , , i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv16i32( %0, %1, i32 %2, @@ -1765,246 +1588,176 @@ ret %a } -declare @llvm.riscv.vssrl.nxv1i64.i64( +declare @llvm.riscv.vssrl.nxv1i64( , - i64, + i32, i32); -define @intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v25, v25, a1 -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vor.vv v25, v26, v25 -; CHECK-NEXT: vssrl.vv v8, v8, v25 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.nxv1i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i64.i64( +declare @llvm.riscv.vssrl.mask.nxv1i64( , , - i64, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v25, v25, a1 -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vor.vv v25, v26, v25 -; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu -; CHECK-NEXT: vssrl.vv v8, v9, v25, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv1i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i64.i64( +declare @llvm.riscv.vssrl.nxv2i64( , - i64, + i32, i32); -define @intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vor.vv v26, v28, v26 -; CHECK-NEXT: vssrl.vv v8, v8, v26 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.nxv2i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i64.i64( +declare @llvm.riscv.vssrl.mask.nxv2i64( , , - i64, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v26, v26, a1 -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vor.vv v26, v28, v26 -; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu -; CHECK-NEXT: vssrl.vv v8, v10, v26, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv2i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i64.i64( +declare @llvm.riscv.vssrl.nxv4i64( , - i64, + i32, i32); -define @intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vmv.v.x v12, a0 -; CHECK-NEXT: vsll.vx v12, v12, a1 -; CHECK-NEXT: vsrl.vx v12, v12, a1 -; CHECK-NEXT: vor.vv v28, v12, v28 -; CHECK-NEXT: vssrl.vv v8, v8, v28 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.nxv4i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i64.i64( +declare @llvm.riscv.vssrl.mask.nxv4i64( , , - i64, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v28, v28, a1 -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vsll.vx v16, v16, a1 -; CHECK-NEXT: vsrl.vx v16, v16, a1 -; CHECK-NEXT: vor.vv v28, v16, v28 -; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu -; CHECK-NEXT: vssrl.vv v8, v12, v28, v0.t +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv4i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i64.i64( +declare @llvm.riscv.vssrl.nxv8i64( , - i64, + i32, i32); -define @intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v16, v16, a1 -; CHECK-NEXT: vmv.v.x v24, a0 -; CHECK-NEXT: vsll.vx v24, v24, a1 -; CHECK-NEXT: vsrl.vx v24, v24, a1 -; CHECK-NEXT: vor.vv v16, v24, v16 -; CHECK-NEXT: vssrl.vv v8, v8, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.nxv8i64( %0, - i64 %1, + i32 %1, i32 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i64.i64( +declare @llvm.riscv.vssrl.mask.nxv8i64( , , - i64, + i32, , i32); -define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: csrrs a3, vlenb, zero -; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsll.vx v0, v24, a1 -; CHECK-NEXT: vmv.v.x v24, a0 -; CHECK-NEXT: vsll.vx v24, v24, a1 -; CHECK-NEXT: vsrl.vx v24, v24, a1 -; CHECK-NEXT: vor.vv v24, v24, v0 -; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t -; CHECK-NEXT: csrrs a0, vlenb, zero -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: addi sp, sp, 16 +define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv8i64( %0, %1, - i64 %2, + i32 %2, %3, i32 %4) @@ -2018,9 +1771,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.nxv1i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2033,10 +1786,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv1i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2050,9 +1803,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.nxv2i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2065,10 +1818,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv2i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2082,9 +1835,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.nxv4i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2097,10 +1850,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv4i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2114,9 +1867,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.nxv8i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2129,10 +1882,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv8i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2146,9 +1899,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.nxv16i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2161,10 +1914,10 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv16i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2178,9 +1931,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.nxv32i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2193,10 +1946,10 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv32i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2210,9 +1963,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.nxv64i8( %0, - i8 9, + i32 9, i32 %1) ret %a @@ -2225,10 +1978,10 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv64i8( %0, %1, - i8 9, + i32 9, %2, i32 %3) @@ -2242,9 +1995,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.nxv1i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2257,10 +2010,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv1i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2274,9 +2027,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.nxv2i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2289,10 +2042,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv2i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2306,9 +2059,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.nxv4i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2321,10 +2074,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv4i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2338,9 +2091,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.nxv8i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2353,10 +2106,10 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv8i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2370,9 +2123,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.nxv16i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2385,10 +2138,10 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv16i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2402,9 +2155,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.nxv32i16( %0, - i16 9, + i32 9, i32 %1) ret %a @@ -2417,10 +2170,10 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv32i16( %0, %1, - i16 9, + i32 9, %2, i32 %3) @@ -2434,7 +2187,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.nxv1i32( %0, i32 9, i32 %1) @@ -2449,7 +2202,7 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv1i32( %0, %1, i32 9, @@ -2466,7 +2219,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.nxv2i32( %0, i32 9, i32 %1) @@ -2481,7 +2234,7 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv2i32( %0, %1, i32 9, @@ -2498,7 +2251,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.nxv4i32( %0, i32 9, i32 %1) @@ -2513,7 +2266,7 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv4i32( %0, %1, i32 9, @@ -2530,7 +2283,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.nxv8i32( %0, i32 9, i32 %1) @@ -2545,7 +2298,7 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv8i32( %0, %1, i32 9, @@ -2562,7 +2315,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.nxv16i32( %0, i32 9, i32 %1) @@ -2577,7 +2330,7 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv16i32( %0, %1, i32 9, @@ -2586,131 +2339,3 @@ ret %a } - -define @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv1i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu -; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv2i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu -; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv4i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu -; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu -; CHECK-NEXT: vssrl.vi v8, v8, 9 -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.nxv8i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu -; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t -; CHECK-NEXT: jalr zero, 0(ra) -entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -973,811 +973,811 @@ ret %a } -declare @llvm.riscv.vssrl.nxv1i8.i8( +declare @llvm.riscv.vssrl.nxv1i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.nxv1i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.i8( +declare @llvm.riscv.vssrl.mask.nxv1i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8: +define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv1i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i8.i8( +declare @llvm.riscv.vssrl.nxv2i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.nxv2i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i8.i8( +declare @llvm.riscv.vssrl.mask.nxv2i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8: +define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv2i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i8.i8( +declare @llvm.riscv.vssrl.nxv4i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.nxv4i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i8.i8( +declare @llvm.riscv.vssrl.mask.nxv4i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8: +define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv4i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i8.i8( +declare @llvm.riscv.vssrl.nxv8i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.nxv8i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i8.i8( +declare @llvm.riscv.vssrl.mask.nxv8i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8: +define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv8i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv16i8.i8( +declare @llvm.riscv.vssrl.nxv16i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.nxv16i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i8.i8( +declare @llvm.riscv.vssrl.mask.nxv16i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8: +define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv16i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv32i8.i8( +declare @llvm.riscv.vssrl.nxv32i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.nxv32i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv32i8.i8( +declare @llvm.riscv.vssrl.mask.nxv32i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8: +define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv32i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv64i8.i8( +declare @llvm.riscv.vssrl.nxv64i8( , - i8, + i64, i64); -define @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.nxv64i8( %0, - i8 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv64i8.i8( +declare @llvm.riscv.vssrl.mask.nxv64i8( , , - i8, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8: +define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv64i8( %0, %1, - i8 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv1i16.i16( +declare @llvm.riscv.vssrl.nxv1i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.nxv1i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i16.i16( +declare @llvm.riscv.vssrl.mask.nxv1i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16: +define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv1i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i16.i16( +declare @llvm.riscv.vssrl.nxv2i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.nxv2i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i16.i16( +declare @llvm.riscv.vssrl.mask.nxv2i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16: +define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv2i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i16.i16( +declare @llvm.riscv.vssrl.nxv4i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.nxv4i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i16.i16( +declare @llvm.riscv.vssrl.mask.nxv4i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16: +define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv4i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i16.i16( +declare @llvm.riscv.vssrl.nxv8i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.nxv8i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i16.i16( +declare @llvm.riscv.vssrl.mask.nxv8i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16: +define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv8i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv16i16.i16( +declare @llvm.riscv.vssrl.nxv16i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.nxv16i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i16.i16( +declare @llvm.riscv.vssrl.mask.nxv16i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16: +define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv16i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv32i16.i16( +declare @llvm.riscv.vssrl.nxv32i16( , - i16, + i64, i64); -define @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.nxv32i16( %0, - i16 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv32i16.i16( +declare @llvm.riscv.vssrl.mask.nxv32i16( , , - i16, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16: +define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv32i16( %0, %1, - i16 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv1i32.i32( +declare @llvm.riscv.vssrl.nxv1i32( , - i32, + i64, i64); -define @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.nxv1i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i32.i32( +declare @llvm.riscv.vssrl.mask.nxv1i32( , , - i32, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32: +define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv1i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv2i32.i32( +declare @llvm.riscv.vssrl.nxv2i32( , - i32, + i64, i64); -define @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.nxv2i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i32.i32( +declare @llvm.riscv.vssrl.mask.nxv2i32( , , - i32, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32: +define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv2i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv4i32.i32( +declare @llvm.riscv.vssrl.nxv4i32( , - i32, + i64, i64); -define @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.nxv4i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i32.i32( +declare @llvm.riscv.vssrl.mask.nxv4i32( , , - i32, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32: +define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv4i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv8i32.i32( +declare @llvm.riscv.vssrl.nxv8i32( , - i32, + i64, i64); -define @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.nxv8i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i32.i32( +declare @llvm.riscv.vssrl.mask.nxv8i32( , , - i32, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32: +define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv8i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv16i32.i32( +declare @llvm.riscv.vssrl.nxv16i32( , - i32, + i64, i64); -define @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.nxv16i32( %0, - i32 %1, + i64 %1, i64 %2) ret %a } -declare @llvm.riscv.vssrl.mask.nxv16i32.i32( +declare @llvm.riscv.vssrl.mask.nxv16i32( , , - i32, + i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32: +define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv16i32( %0, %1, - i32 %2, + i64 %2, %3, i64 %4) ret %a } -declare @llvm.riscv.vssrl.nxv1i64.i64( +declare @llvm.riscv.vssrl.nxv1i64( , i64, i64); -define @intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.nxv1i64( %0, i64 %1, i64 %2) @@ -1785,21 +1785,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i64.i64( +declare @llvm.riscv.vssrl.mask.nxv1i64( , , i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64: +define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv1i64( %0, %1, i64 %2, @@ -1809,19 +1809,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv2i64.i64( +declare @llvm.riscv.vssrl.nxv2i64( , i64, i64); -define @intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.nxv2i64( %0, i64 %1, i64 %2) @@ -1829,21 +1829,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv2i64.i64( +declare @llvm.riscv.vssrl.mask.nxv2i64( , , i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64: +define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv2i64( %0, %1, i64 %2, @@ -1853,19 +1853,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv4i64.i64( +declare @llvm.riscv.vssrl.nxv4i64( , i64, i64); -define @intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.nxv4i64( %0, i64 %1, i64 %2) @@ -1873,21 +1873,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv4i64.i64( +declare @llvm.riscv.vssrl.mask.nxv4i64( , , i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64: +define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv4i64( %0, %1, i64 %2, @@ -1897,19 +1897,19 @@ ret %a } -declare @llvm.riscv.vssrl.nxv8i64.i64( +declare @llvm.riscv.vssrl.nxv8i64( , i64, i64); -define @intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.nxv8i64( %0, i64 %1, i64 %2) @@ -1917,21 +1917,21 @@ ret %a } -declare @llvm.riscv.vssrl.mask.nxv8i64.i64( +declare @llvm.riscv.vssrl.mask.nxv8i64( , , i64, , i64); -define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64: +define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv8i64( %0, %1, i64 %2, @@ -1948,9 +1948,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.nxv1i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1963,10 +1963,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv1i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -1980,9 +1980,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.nxv2i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -1995,10 +1995,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv2i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2012,9 +2012,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.nxv4i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2027,10 +2027,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv4i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2044,9 +2044,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.nxv8i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2059,10 +2059,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv8i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2076,9 +2076,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.nxv16i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2091,10 +2091,10 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv16i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2108,9 +2108,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.nxv32i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2123,10 +2123,10 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv32i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.nxv64i8( %0, - i8 9, + i64 9, i64 %1) ret %a @@ -2155,10 +2155,10 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv64i8.i8( + %a = call @llvm.riscv.vssrl.mask.nxv64i8( %0, %1, - i8 9, + i64 9, %2, i64 %3) @@ -2172,9 +2172,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.nxv1i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2187,10 +2187,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv1i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2204,9 +2204,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.nxv2i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2219,10 +2219,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv2i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2236,9 +2236,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.nxv4i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2251,10 +2251,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv4i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2268,9 +2268,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.nxv8i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2283,10 +2283,10 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv8i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2300,9 +2300,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.nxv16i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2315,10 +2315,10 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv16i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2332,9 +2332,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.nxv32i16( %0, - i16 9, + i64 9, i64 %1) ret %a @@ -2347,10 +2347,10 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv32i16.i16( + %a = call @llvm.riscv.vssrl.mask.nxv32i16( %0, %1, - i16 9, + i64 9, %2, i64 %3) @@ -2364,9 +2364,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.nxv1i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2379,10 +2379,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv1i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2396,9 +2396,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.nxv2i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2411,10 +2411,10 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv2i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2428,9 +2428,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.nxv4i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2443,10 +2443,10 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv4i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2460,9 +2460,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.nxv8i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2475,10 +2475,10 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv8i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2492,9 +2492,9 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.nxv16i32( %0, - i32 9, + i64 9, i64 %1) ret %a @@ -2507,10 +2507,10 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv16i32.i32( + %a = call @llvm.riscv.vssrl.mask.nxv16i32( %0, %1, - i32 9, + i64 9, %2, i64 %3) @@ -2524,7 +2524,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.nxv1i64( %0, i64 9, i64 %1) @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv1i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv1i64( %0, %1, i64 9, @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.nxv2i64( %0, i64 9, i64 %1) @@ -2571,7 +2571,7 @@ ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv2i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv2i64( %0, %1, i64 9, @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.nxv4i64( %0, i64 9, i64 %1) @@ -2603,7 +2603,7 @@ ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv4i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv4i64( %0, %1, i64 9, @@ -2620,7 +2620,7 @@ ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.nxv8i64( %0, i64 9, i64 %1) @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call @llvm.riscv.vssrl.mask.nxv8i64.i64( + %a = call @llvm.riscv.vssrl.mask.nxv8i64( %0, %1, i64 9,