Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -467,6 +467,9 @@ defm vfwmul : RISCVBinaryABX; + defm vfmin : RISCVBinaryAAX; + defm vfmax : RISCVBinaryAAX; + defm vfsgnj : RISCVBinaryAAX; defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1729,6 +1729,12 @@ defm PseudoVFWMUL : VPseudoBinaryW_VV_VX; //===----------------------------------------------------------------------===// +// 14.9. Vector Floating-Point Min/Max Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFMIN : VPseudoBinaryV_VV_VX; +defm PseudoVFMAX : VPseudoBinaryV_VV_VX; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm PseudoVFSGNJ : VPseudoBinaryV_VV_VX; @@ -2060,6 +2066,12 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; //===----------------------------------------------------------------------===// +// 14.9. Vector Floating-Point Min/Max Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>; Index: llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( + , + , + i32); + +define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( + , + , + i64); + +define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmax.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( + , + , + i32); + +define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( + , + , + i64); + +define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}} + %a = call @llvm.riscv.vfmin.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +}