diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -293,6 +293,20 @@ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic; + class RISCVTernaryAAXANoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + class RISCVTernaryAAXAMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -343,6 +357,10 @@ def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask; } + multiclass RISCVTernaryAAXA { + def "int_riscv_" # NAME : RISCVTernaryAAXANoMask; + def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask; + } defm vle : RISCVUSLoad; defm vse : RISCVUSStore; @@ -399,6 +417,11 @@ defm vwmulu : RISCVBinaryABX; defm vwmulsu : RISCVBinaryABX; + defm vmacc : RISCVTernaryAAXA; + defm vnmsac : RISCVTernaryAAXA; + defm vmadd : RISCVTernaryAAXA; + defm vnmsub : RISCVTernaryAAXA; + defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -854,16 +854,31 @@ } } +multiclass VPseudoTernaryV_VV { + foreach m = MxList.m in + defm _VV : VPseudoTernary; +} + multiclass VPseudoTernaryV_VX { foreach m = MxList.m in defm _VX : VPseudoTernary; } +multiclass VPseudoTernaryV_VX_AAXA { + foreach m = MxList.m in + defm _VX : VPseudoTernary; +} + multiclass VPseudoTernaryV_VI { foreach m = MxList.m in defm _VI : VPseudoTernary; } +multiclass VPseudoTernaryV_VV_VX_AAXA { + defm "" : VPseudoTernaryV_VV; + defm "" : VPseudoTernaryV_VX_AAXA; +} + multiclass VPseudoTernaryV_VX_VI { defm "" : VPseudoTernaryV_VX; defm "" : VPseudoTernaryV_VI; @@ -1475,6 +1490,15 @@ op2_kind>; } +multiclass VPatTernaryV_VV vtilist> { + foreach vti = vtilist in + defm : VPatTernary; +} + multiclass VPatTernaryV_VX vtilist> { foreach vti = vtilist in @@ -1484,6 +1508,15 @@ vti.RegClass, GPR>; } +multiclass VPatTernaryV_VX_AAXA vtilist> { + foreach vti = vtilist in + defm : VPatTernary; +} + multiclass VPatTernaryV_VI vtilist, Operand Imm_type> { foreach vti = vtilist in @@ -1493,6 +1526,12 @@ vti.RegClass, Imm_type>; } +multiclass VPatTernaryV_VV_VX_AAXA vtilist> { + defm "" : VPatTernaryV_VV; + defm "" : VPatTernaryV_VX_AAXA; +} + multiclass VPatTernaryV_VX_VI vtilist, Operand Imm_type = simm5> { defm "" : VPatTernaryV_VX; @@ -1643,6 +1682,14 @@ defm PseudoVWMULSU : VPseudoBinaryW_VV_VX; //===----------------------------------------------------------------------===// +// 12.13. Vector Single-Width Integer Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm PseudoVMACC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVNMSAC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVMADD : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVNMSUB : VPseudoTernaryV_VV_VX_AAXA; + +//===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions //===----------------------------------------------------------------------===// @@ -1929,6 +1976,14 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>; //===----------------------------------------------------------------------===// +// 12.13. Vector Single-Width Integer Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>; + +//===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions //===----------------------------------------------------------------------===// foreach vti = AllVectors in { diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll @@ -0,0 +1,1261 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv32i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll @@ -0,0 +1,1513 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv32i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv16i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv8i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv1i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv1i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv2i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv2i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmacc.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmacc.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmacc.mask.nxv4i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmacc.mask.nxv4i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll @@ -0,0 +1,1261 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv32i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll @@ -0,0 +1,1513 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv32i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv16i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv8i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv1i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv1i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv2i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv2i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmadd.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vmadd.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadd.mask.nxv4i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmadd.mask.nxv4i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll @@ -0,0 +1,1261 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll @@ -0,0 +1,1513 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv1i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv1i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv2i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv2i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsac.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsac.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsac.mask.nxv4i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsac.mask.nxv4i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll @@ -0,0 +1,1261 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( + , + i8, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( + , + i16, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( + , + i32, + , + , + i32); + +define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll @@ -0,0 +1,1513 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( + , + i8, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv32i8.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( + , + i16, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv16i16.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( + , + i32, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv8i32.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv1i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv1i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv2i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv2i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vnmsub.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}} + %a = call @llvm.riscv.vnmsub.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vnmsub.mask.nxv4i64.i64( + , + i64, + , + , + i64); + +define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vnmsub.mask.nxv4i64.i64( + %0, + i64 %1, + %2, + %3, + i64 %4) + + ret %a +}