diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -338,6 +338,20 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; } + class RISCVTernaryWideNoMask + : Intrinsic< [llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, + llvm_anyint_ty], + [IntrNoMem] >, RISCVVIntrinsic { + let ExtendOperand = 2; + } + class RISCVTernaryWideMask + : Intrinsic< [llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty ], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -406,6 +420,10 @@ def "int_riscv_" # NAME : RISCVCompareNoMask; def "int_riscv_" # NAME # "_mask" : RISCVCompareMask; } + multiclass RISCVTernaryWide { + def "int_riscv_" # NAME : RISCVTernaryWideNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -481,6 +499,11 @@ defm vmadd : RISCVTernaryAAXA; defm vnmsub : RISCVTernaryAAXA; + defm vwmaccu : RISCVTernaryWide; + defm vwmacc : RISCVTernaryWide; + defm vwmaccus : RISCVTernaryWide; + defm vwmaccsu : RISCVTernaryWide; + defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -890,6 +890,18 @@ defm _VX : VPseudoTernary; } +multiclass VPseudoTernaryW_VV { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxList.m in + defm _VV : VPseudoTernary; +} + +multiclass VPseudoTernaryW_VX { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxList.m in + defm _VX : VPseudoTernary; +} + multiclass VPseudoTernaryV_VI { foreach m = MxList.m in defm _VI : VPseudoTernary; @@ -905,6 +917,11 @@ defm "" : VPseudoTernaryV_VI; } +multiclass VPseudoTernaryW_VV_VX { + defm "" : VPseudoTernaryW_VV; + defm "" : VPseudoTernaryW_VX; +} + multiclass VPseudoBinaryM_VV_VX_VI { defm "" : VPseudoBinaryM_VV; defm "" : VPseudoBinaryM_VX; @@ -1591,6 +1608,30 @@ vti.RegClass, Imm_type>; } +multiclass VPatTernaryW_VV vtilist> { + foreach vtiToWti = vtilist in { + defvar vti = vtiToWti.Vti; + defvar wti = vtiToWti.Wti; + defm : VPatTernary; + } +} + +multiclass VPatTernaryW_VX vtilist> { + foreach vtiToWti = vtilist in { + defvar vti = vtiToWti.Vti; + defvar wti = vtiToWti.Wti; + defm : VPatTernary; + } +} + multiclass VPatTernaryV_VV_VX_AAXA vtilist> { defm "" : VPatTernaryV_VV; @@ -1611,6 +1652,12 @@ defm "" : VPatBinaryM_VI; } +multiclass VPatTernaryW_VV_VX vtilist> { + defm "" : VPatTernaryW_VV; + defm "" : VPatTernaryW_VX; +} + multiclass VPatBinaryM_VV_VX vtilist> { @@ -1806,6 +1853,14 @@ defm PseudoVNMSUB : VPseudoTernaryV_VV_VX_AAXA; //===----------------------------------------------------------------------===// +// 12.14. Vector Widening Integer Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm PseudoVWMACCU : VPseudoTernaryW_VV_VX; +defm PseudoVWMACC : VPseudoTernaryW_VV_VX; +defm PseudoVWMACCSU : VPseudoTernaryW_VV_VX; +defm PseudoVWMACCUS : VPseudoTernaryW_VX; + +//===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions //===----------------------------------------------------------------------===// @@ -2165,6 +2220,14 @@ defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>; //===----------------------------------------------------------------------===// +// 12.14. Vector Widening Integer Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>; +defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>; +defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>; +defm "" : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>; + +//===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions //===----------------------------------------------------------------------===// foreach vti = AllVectors in { diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll @@ -0,0 +1,1034 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i16.nxv2i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i16.nxv4i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i16.nxv8i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i16.nxv16i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv32i16.nxv32i8( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i32.nxv1i16( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i32.nxv2i16( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i32.nxv4i16( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i32.nxv8i16( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i32.nxv16i16( + , + , + , + i32); + +define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv32i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv32i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv32i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll @@ -0,0 +1,1412 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i16.nxv2i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i16.nxv4i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i16.nxv8i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i16.nxv16i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv32i16.nxv32i8( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i32.nxv1i16( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i32.nxv2i16( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i32.nxv4i16( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i32.nxv8i16( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i32.nxv16i16( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i64.nxv1i32( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i64.nxv2i32( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i64.nxv4i32( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i64.nxv8i32( + , + , + , + i64); + +define @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv32i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv32i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv32i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv16i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv16i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv16i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv1i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv1i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv1i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv1i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv2i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv2i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv2i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv2i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv4i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv4i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv4i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv4i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmacc.nxv8i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.nxv8i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmacc.mask.nxv8i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmacc.mask.nxv8i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll @@ -0,0 +1,1034 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( + , + , + , + i32); + +define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv32i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv32i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll @@ -0,0 +1,1412 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32( + , + , + , + i64); + +define @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv32i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv32i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv16i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv16i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv1i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv1i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv1i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv2i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv2i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv2i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv4i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv4i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv4i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.nxv8i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.nxv8i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccsu.mask.nxv8i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll @@ -0,0 +1,1034 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( + , + , + , + i32); + +define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv32i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv32i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv32i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll @@ -0,0 +1,1412 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i64.nxv1i32( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i64.nxv2i32( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i64.nxv4i32( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i64.nxv8i32( + , + , + , + i64); + +define @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv32i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv32i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv32i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv16i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv16i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv16i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv1i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv1i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv1i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv2i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv2i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv2i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv4i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv4i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv4i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccu.nxv8i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.nxv8i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccu.mask.nxv8i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll @@ -0,0 +1,516 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccus.nxv1i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv1i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv1i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv2i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv2i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv2i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv4i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv4i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv4i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv8i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv8i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv8i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv16i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv16i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv16i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv32i16.i8( + , + i8, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv32i16.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv32i16.i8( + , + i8, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv1i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv1i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv1i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv2i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv2i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv2i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv4i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv4i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv4i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv8i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv8i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv8i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv16i32.i16( + , + i16, + , + i32); + +define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv16i32.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv16i32.i16( + , + i16, + , + , + i32); + +define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll @@ -0,0 +1,704 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmaccus.nxv1i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv1i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv1i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv2i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv2i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv2i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv4i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv4i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv4i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv8i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv8i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv8i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv16i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv16i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv16i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv32i16.i8( + , + i8, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv32i16.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv32i16.i8( + , + i8, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8( + %0, + i8 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv1i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv1i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv1i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv2i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv2i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv2i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv4i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv4i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv4i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv8i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv8i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv8i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv16i32.i16( + , + i16, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv16i32.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv16i32.i16( + , + i16, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16( + %0, + i16 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv1i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv1i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv1i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv2i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv2i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv2i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv4i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv4i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv4i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmaccus.nxv8i64.i32( + , + i32, + , + i64); + +define @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.nxv8i64.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwmaccus.mask.nxv8i64.i32( + , + i32, + , + , + i64); + +define @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32( + %0, + i32 %1, + %2, + %3, + i64 %4) + + ret %a +}