diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -455,6 +455,8 @@ defm vssubu : RISCVSaturatingBinaryAAX; defm vssub : RISCVSaturatingBinaryAAX; + def int_riscv_vmerge : RISCVBinaryWithV0; + def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; @@ -463,7 +465,6 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 1; } - def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; @@ -492,6 +493,8 @@ defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; + defm vfmerge : RISCVBinaryWithV0; + defm vslideup : RISCVTernaryAAAX; defm vslidedown : RISCVTernaryAAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -749,13 +749,14 @@ } multiclass VPseudoBinaryV_XM { + string Constraint = "", bit IsFloat = 0> { foreach m = MxList.m in - def "_VX" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX : + def !if(!eq(IsFloat, 0), "_VX", "_VF") # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass)), - m.vrclass, GPR, m, CarryIn, Constraint>; + m.vrclass, !if(!eq(IsFloat, 0), GPR, FPR32), + m, CarryIn, Constraint>; } multiclass VPseudoBinaryV_IM { - foreach vti = AllIntegerVectors in + bit CarryOut = 0, + list vtilist = AllIntegerVectors> { + foreach vti = vtilist in defm : VPatBinaryCarryIn { - foreach vti = AllIntegerVectors in - defm : VPatBinaryCarryIn vtilist = AllIntegerVectors> { + foreach vti = vtilist in + defm : VPatBinaryCarryIn; + vti.RegClass, vti.ScalarRegClass>; } multiclass VPatBinaryV_IM; defm PseudoVFSGNJX : VPseudoBinaryV_VV_VX; +//===----------------------------------------------------------------------===// +// 14.15. Vector Floating-Point Merge Instruction +//===----------------------------------------------------------------------===// +defm PseudoVFMERGE : VPseudoBinaryV_XM; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// @@ -2030,6 +2044,10 @@ defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>; defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>; +// 12.15. Vector Integer Merge Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">; + //===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions //===----------------------------------------------------------------------===// @@ -2121,6 +2139,15 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.15. Vector Floating-Point Merge Instruction +//===----------------------------------------------------------------------===// +// We can use vmerge.vvm to support vector-vector vfmerge. +defm "" : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE", + /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; +defm "" : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE", + /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll @@ -0,0 +1,441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} +declare @llvm.riscv.vfmerge.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv32f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll @@ -0,0 +1,601 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} +declare @llvm.riscv.vfmerge.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv32f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv16f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8 +; CHECK: vfmerge.vfm {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0 + %a = call @llvm.riscv.vfmerge.nxv8f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll @@ -0,0 +1,973 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.nxv64i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( + , + , + , + i32); + +define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv64i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll @@ -0,0 +1,1189 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.nxv64i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i64.nxv8i64( + , + , + , + i64); + +define @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8 +; CHECK: vmerge.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i64.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv64i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv32i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv16i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8 +; CHECK: vmerge.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmerge.nxv8i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv32i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv64i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv32i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv16i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv1i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv2i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv4i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8 +; CHECK: vmerge.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmerge.nxv8i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +}