diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -193,4 +193,7 @@ defm vmaxu : RISCVBinaryAAX; defm vmax : RISCVBinaryAAX; + defm vfadd : RISCVBinaryAAX; + defm vfsub : RISCVBinaryAAX; + defm vfrsub : RISCVBinaryAAX; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -84,18 +84,22 @@ // Vector register and vector group type information. //===----------------------------------------------------------------------===// -class VTypeInfo +class VTypeInfo { ValueType Vector = Vec; ValueType Mask = Mas; int SEW = Sew; VReg RegClass = Reg; LMULInfo LMul = M; + ValueType Scalar = Scal; + RegisterClass ScalarRegClass = ScalarReg; } -class GroupVTypeInfo - : VTypeInfo +class GroupVTypeInfo + : VTypeInfo { ValueType VectorM1 = VecM1; } @@ -129,6 +133,42 @@ def VI64M4: GroupVTypeInfo; def VI64M8: GroupVTypeInfo; } + + defset list AllFloatVectors = { + defset list NoGroupFloatVectors = { + def VF16MF4: VTypeInfo; + def VF16MF2: VTypeInfo; + def VF16M1: VTypeInfo; + + def VF32MF2: VTypeInfo; + def VF32M1: VTypeInfo; + + def VF64M1: VTypeInfo; + } + + defset list GroupFloatVectors = { + def VF16M2: GroupVTypeInfo; + def VF16M4: GroupVTypeInfo; + def VF16M8: GroupVTypeInfo; + + def VF32M2: GroupVTypeInfo; + def VF32M4: GroupVTypeInfo; + def VF32M8: GroupVTypeInfo; + + def VF64M2: GroupVTypeInfo; + def VF64M4: GroupVTypeInfo; + def VF64M8: GroupVTypeInfo; + } + } } class VTypeInfoToWide @@ -320,9 +360,10 @@ defm _VV : VPseudoBinary; } -multiclass VPseudoBinaryV_VX { +multiclass VPseudoBinaryV_VX { foreach m = MxList.m in - defm _VX : VPseudoBinary; + defm !if(!eq(IsFloat, 0), "_VX", "_VF") : VPseudoBinary; } multiclass VPseudoBinaryV_VI { @@ -414,17 +455,17 @@ multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VX; defm "" : VPseudoBinaryV_VI; } -multiclass VPseudoBinaryV_VV_VX { +multiclass VPseudoBinaryV_VV_VX { defm "" : VPseudoBinaryV_VV; - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VX; } multiclass VPseudoBinaryV_VX_VI { - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VX; defm "" : VPseudoBinaryV_VI; } @@ -624,10 +665,11 @@ multiclass VPatBinaryV_VX vtilist> { foreach vti = vtilist in - defm : VPatBinary; + vti.RegClass, vti.ScalarRegClass>; } multiclass VPatBinaryV_VI; +defm PseudoVFSUB : VPseudoBinaryV_VV_VX; +defm PseudoVFRSUB : VPseudoBinaryV_VX; + +//===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -1083,4 +1132,11 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>; +//===----------------------------------------------------------------------===// +// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; +defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; + } // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -0,0 +1,882 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( + , + , + i32); + +define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -0,0 +1,1202 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( + , + , + i64); + +define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfadd.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll @@ -0,0 +1,442 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfrsub.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll @@ -0,0 +1,602 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfrsub.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrsub.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -0,0 +1,882 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( + , + , + i32); + +define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -0,0 +1,1202 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( + , + , + i64); + +define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfsub.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +}