diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -551,6 +551,11 @@ defm vfwmul : RISCVBinaryABX; + defm vfwmacc : RISCVTernaryWide; + defm vfwnmacc : RISCVTernaryWide; + defm vfwmsac : RISCVTernaryWide; + defm vfwnmsac : RISCVTernaryWide; + defm vfsgnj : RISCVBinaryAAX; defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -896,10 +896,11 @@ defm _VV : VPseudoTernary; } -multiclass VPseudoTernaryW_VX { +multiclass VPseudoTernaryW_VX { defvar constraint = "@earlyclobber $rd"; foreach m = MxList.m in - defm _VX : VPseudoTernary; + defm !if(!eq(IsFloat, 0), "_VX", "_VF") : VPseudoTernary; } multiclass VPseudoTernaryV_VI { @@ -917,9 +918,9 @@ defm "" : VPseudoTernaryV_VI; } -multiclass VPseudoTernaryW_VV_VX { +multiclass VPseudoTernaryW_VV_VX { defm "" : VPseudoTernaryW_VV; - defm "" : VPseudoTernaryW_VX; + defm "" : VPseudoTernaryW_VX; } multiclass VPseudoBinaryM_VV_VX_VI { @@ -1625,10 +1626,11 @@ foreach vtiToWti = vtilist in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; - defm : VPatTernary; + defm : VPatTernary; } } @@ -1858,7 +1860,7 @@ defm PseudoVWMACCU : VPseudoTernaryW_VV_VX; defm PseudoVWMACC : VPseudoTernaryW_VV_VX; defm PseudoVWMACCSU : VPseudoTernaryW_VV_VX; -defm PseudoVWMACCUS : VPseudoTernaryW_VX; +defm PseudoVWMACCUS : VPseudoTernaryW_VX; //===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions @@ -1940,6 +1942,14 @@ defm PseudoVFWMUL : VPseudoBinaryW_VV_VX; //===----------------------------------------------------------------------===// +// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFWMACC : VPseudoTernaryW_VV_VX; +defm PseudoVFWNMACC : VPseudoTernaryW_VV_VX; +defm PseudoVFWMSAC : VPseudoTernaryW_VV_VX; +defm PseudoVFWNMSAC : VPseudoTernaryW_VV_VX; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm PseudoVFSGNJ : VPseudoBinaryV_VV_VX; @@ -2312,6 +2322,14 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; //===----------------------------------------------------------------------===// +// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>; +defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>; +defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>; +defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll @@ -0,0 +1,482 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv1f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv16f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll @@ -0,0 +1,868 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv1f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv16f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv1f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv1f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv2f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv2f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv4f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv4f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmacc.nxv8f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.nxv8f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll @@ -0,0 +1,482 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll @@ -0,0 +1,868 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll @@ -0,0 +1,482 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll @@ -0,0 +1,868 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmacc.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll @@ -0,0 +1,482 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.f16( + , + half, + , + i32); + +define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( + , + half, + , + , + i32); + +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll @@ -0,0 +1,868 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v18, v19, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v20, v22, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vv v16, v8, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.f16( + , + half, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( + , + half, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f64.f32( + , + float, + , + i64); + +define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f64.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( + , + float, + , + , + i64); + +define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu +; CHECK-NEXT: vfwnmsac.vf v16, ft0, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +}