Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -435,6 +435,29 @@ def AArch64smin_m1 : EitherVSelectOrPassthruPatFrags; def AArch64umin_m1 : EitherVSelectOrPassthruPatFrags; +class fp_bin_patfrags +: PatFrags<(ops node:$pg, node:$op1, node:$op2), + [(sdnode node:$pg, node:$op1, node:$op2), + (vselect node:$pg, (sdnode node:$pg, node:$op1, node:$op2), node:$op1), + (vselect node:$pg, (sdnode node:$pg, node:$op1, node:$op2), node:$op2)]>; + +class fp_unary_patfrags +: PatFrags<(ops node:$pg, node:$op, node:$pt), + [(sdnode node:$pg, node:$op, node:$pt), + (vselect node:$pg, (sdnode node:$pg, node:$op, node:$pt), node:$op)]>; + +def AArch64fadd_p1 : fp_bin_patfrags; +def AArch64fsub_p1 : fp_bin_patfrags; +def AArch64fmul_p1 : fp_bin_patfrags; +def AArch64fmaxnm_p1 : fp_bin_patfrags; +def AArch64fminnm_p1 : fp_bin_patfrags; +def AArch64fmax_p1 : fp_bin_patfrags; +def AArch64fmin_p1 : fp_bin_patfrags; +def AArch64fabd_p1 : fp_bin_patfrags; +def AArch64fdiv_p1 : fp_bin_patfrags; +def AArch64fneg_mt1 : fp_unary_patfrags; +def AArch64fabs_mt1 : fp_unary_patfrags; + let Predicates = [HasSVE] in { defm RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>; def RDFFRS_PPz : sve_int_rdffr_pred<0b1, "rdffrs">; @@ -552,8 +575,8 @@ defm CNT_ZPmZ : sve_int_un_pred_arit_1< 0b010, "cnt", AArch64cnt_mt>; defm CNOT_ZPmZ : sve_int_un_pred_arit_1< 0b011, "cnot", AArch64cnot_mt>; defm NOT_ZPmZ : sve_int_un_pred_arit_1< 0b110, "not", AArch64not_mt>; - defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs", AArch64fabs_mt>; - defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg", AArch64fneg_mt>; + defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs", AArch64fabs_mt1>; + defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg", AArch64fneg_mt1>; // zext(cmpeq(x, splat(0))) -> cnot(x) def : Pat<(nxv16i8 (zext (nxv16i1 (AArch64setcc_z (nxv16i1 (SVEAllActive):$Pg), nxv16i8:$Op2, (SVEDup0), SETEQ)))), @@ -625,16 +648,16 @@ defm FDIVR_ZPmZ : sve_fp_2op_p_zds<0b1100, "fdivr", "FDIVR_ZPZZ", int_aarch64_sve_fdivr, DestructiveBinaryCommWithRev, "FDIV_ZPmZ", /*isReverseInstr*/ 1>; defm FDIV_ZPmZ : sve_fp_2op_p_zds<0b1101, "fdiv", "FDIV_ZPZZ", int_aarch64_sve_fdiv, DestructiveBinaryCommWithRev, "FDIVR_ZPmZ">; - defm FADD_ZPZZ : sve_fp_bin_pred_hfd; - defm FSUB_ZPZZ : sve_fp_bin_pred_hfd; - defm FMUL_ZPZZ : sve_fp_bin_pred_hfd; - defm FMAXNM_ZPZZ : sve_fp_bin_pred_hfd; - defm FMINNM_ZPZZ : sve_fp_bin_pred_hfd; - defm FMAX_ZPZZ : sve_fp_bin_pred_hfd; - defm FMIN_ZPZZ : sve_fp_bin_pred_hfd; - defm FABD_ZPZZ : sve_fp_bin_pred_hfd; + defm FADD_ZPZZ : sve_fp_bin_pred_hfd; + defm FSUB_ZPZZ : sve_fp_bin_pred_hfd; + defm FMUL_ZPZZ : sve_fp_bin_pred_hfd; + defm FMAXNM_ZPZZ : sve_fp_bin_pred_hfd; + defm FMINNM_ZPZZ : sve_fp_bin_pred_hfd; + defm FMAX_ZPZZ : sve_fp_bin_pred_hfd; + defm FMIN_ZPZZ : sve_fp_bin_pred_hfd; + defm FABD_ZPZZ : sve_fp_bin_pred_hfd; defm FMULX_ZPZZ : sve_fp_bin_pred_hfd; - defm FDIV_ZPZZ : sve_fp_bin_pred_hfd; + defm FDIV_ZPZZ : sve_fp_bin_pred_hfd; } // End HasSVEorSME let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { Index: llvm/test/CodeGen/AArch64/sve-sel-instruction-undef-predicate.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-sel-instruction-undef-predicate.ll @@ -0,0 +1,500 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define @fadd_f64( %pg, %a, %b) { +; CHECK-LABEL: fadd_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fadd_f32( %pg, %a, %b) { +; CHECK-LABEL: fadd_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fadd_f16( %pg, %a, %b) { +; CHECK-LABEL: fadd_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fadd_rev_f64( %pg, %a, %b) { +; CHECK-LABEL: fadd_rev_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fadd_rev_f32( %pg, %a, %b) { +; CHECK-LABEL: fadd_rev_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fadd_rev_f16( %pg, %a, %b) { +; CHECK-LABEL: fadd_rev_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fadd.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fsub_f64( %pg, %a, %b) { +; CHECK-LABEL: fsub_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fsub_f32( %pg, %a, %b) { +; CHECK-LABEL: fsub_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fsub_f16( %pg, %a, %b) { +; CHECK-LABEL: fsub_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fsub_rev_f64( %pg, %a, %b) { +; CHECK-LABEL: fsub_rev_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fsub_rev_f32( %pg, %a, %b) { +; CHECK-LABEL: fsub_rev_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fsub_rev_f16( %pg, %a, %b) { +; CHECK-LABEL: fsub_rev_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fsub.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fmul_f64( %pg, %a, %b) { +; CHECK-LABEL: fmul_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmul.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmul_f32( %pg, %a, %b) { +; CHECK-LABEL: fmul_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmul.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmul_f16( %pg, %a, %b) { +; CHECK-LABEL: fmul_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmul.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmaxnm_f64( %pg, %a, %b) { +; CHECK-LABEL: fmaxnm_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmaxnm.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmaxnm_f32( %pg, %a, %b) { +; CHECK-LABEL: fmaxnm_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmaxnm.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmaxnm_f16( %pg, %a, %b) { +; CHECK-LABEL: fmaxnm_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmaxnm.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fminnm_f64( %pg, %a, %b) { +; CHECK-LABEL: fminnm_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fminnm.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fminnm_f32( %pg, %a, %b) { +; CHECK-LABEL: fminnm_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fminnm.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fminnm_f16( %pg, %a, %b) { +; CHECK-LABEL: fminnm_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fminnm.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmax_f64( %pg, %a, %b) { +; CHECK-LABEL: fmax_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmax.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmax_f32( %pg, %a, %b) { +; CHECK-LABEL: fmax_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmax.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmax_f16( %pg, %a, %b) { +; CHECK-LABEL: fmax_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmax.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmin_f64( %pg, %a, %b) { +; CHECK-LABEL: fmin_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmin.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmin_f32( %pg, %a, %b) { +; CHECK-LABEL: fmin_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmin.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fmin_f16( %pg, %a, %b) { +; CHECK-LABEL: fmin_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fmin.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fabd_f64( %pg, %a, %b) { +; CHECK-LABEL: fabd_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabd.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fabd_f32( %pg, %a, %b) { +; CHECK-LABEL: fabd_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabd.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fabd_f16( %pg, %a, %b) { +; CHECK-LABEL: fabd_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabd.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fdiv_f64( %pg, %a, %b) { +; CHECK-LABEL: fdiv_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fdiv_f32( %pg, %a, %b) { +; CHECK-LABEL: fdiv_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fdiv_f16( %pg, %a, %b) { +; CHECK-LABEL: fdiv_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %a + ret %sel +} + +define @fdiv_rev_f64( %pg, %a, %b) { +; CHECK-LABEL: fdiv_rev_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv2f64( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fdiv_rev_f32( %pg, %a, %b) { +; CHECK-LABEL: fdiv_rev_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv4f32( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fdiv_rev_f16( %pg, %a, %b) { +; CHECK-LABEL: fdiv_rev_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fdiv.u.nxv8f16( %pg, %a, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fneg_f64( %pg, %a, %b) { +; CHECK-LABEL: fneg_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fneg z0.d, p0/m, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fneg.u.nxv2f64( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fneg_f32( %pg, %a, %b) { +; CHECK-LABEL: fneg_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fneg z0.s, p0/m, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fneg.u.nxv4f32( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fneg_f16( %pg, %a, %b) { +; CHECK-LABEL: fneg_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fneg z0.h, p0/m, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fneg.u.nxv8f16( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fabs_f64( %pg, %a, %b) { +; CHECK-LABEL: fabs_f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabs z0.d, p0/m, z1.d +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabs.u.nxv2f64( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fabs_f32( %pg, %a, %b) { +; CHECK-LABEL: fabs_f32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabs z0.s, p0/m, z1.s +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabs.u.nxv4f32( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +define @fabs_f16( %pg, %a, %b) { +; CHECK-LABEL: fabs_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fabs z0.h, p0/m, z1.h +; CHECK-NEXT: ret +entry: + %r = tail call @llvm.aarch64.sve.fabs.u.nxv8f16( %a, %pg, %b) + %sel = select %pg, %r, %b + ret %sel +} + +declare @llvm.aarch64.sve.fadd.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fadd.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fadd.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fsub.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fsub.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fsub.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fmul.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmul.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fmul.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fmaxnm.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmaxnm.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fmaxnm.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fminnm.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fminnm.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fminnm.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fmax.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmax.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fmax.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fmin.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmin.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fmin.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fabd.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fabd.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fabd.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fdiv.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fdiv.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fdiv.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fneg.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fneg.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fneg.u.nxv8f16(, , ) +declare @llvm.aarch64.sve.fabs.u.nxv2f64(, , ) +declare @llvm.aarch64.sve.fabs.u.nxv4f32(, , ) +declare @llvm.aarch64.sve.fabs.u.nxv8f16(, , )