diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -165,8 +165,8 @@ def AArch64lastb : SDNode<"AArch64ISD::LASTB", SDT_AArch64Reduce>; def SDT_AArch64Arith : SDTypeProfile<1, 3, [ - SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>, - SDTCVecEltisVT<1,i1>, SDTCisSameAs<0,2>, SDTCisSameAs<2,3> + SDTCisVec<0>, SDTCVecEltisVT<1,i1>, SDTCisSameAs<0,2>, + SDTCisSameAs<2,3>, SDTCisSameNumEltsAs<0,1> ]>; def SDT_AArch64FMA : SDTypeProfile<1, 4, [ @@ -240,6 +240,10 @@ def AArch64cnot_mt : PatFrags<(ops node:$pg, node:$op, node:$pt), [(int_aarch64_sve_cnot node:$pt, node:$pg, node:$op)]>; def AArch64not_mt : PatFrags<(ops node:$pg, node:$op, node:$pt), [(int_aarch64_sve_not node:$pt, node:$pg, node:$op)]>; +def AArch64fmul_m1 : EitherVSelectOrPassthruPatFrags; +def AArch64fadd_m1 : EitherVSelectOrPassthruPatFrags; +def AArch64fsub_m1 : EitherVSelectOrPassthruPatFrags; + def SDT_AArch64FCVT : SDTypeProfile<1, 3, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>, SDTCVecEltisVT<1,i1> @@ -451,9 +455,9 @@ defm FMIN_ZPZI : sve_fp_2op_i_p_zds_zeroing_hfd; } - defm FADD_ZPmZ : sve_fp_2op_p_zds<0b0000, "fadd", "FADD_ZPZZ", int_aarch64_sve_fadd, DestructiveBinaryComm>; - defm FSUB_ZPmZ : sve_fp_2op_p_zds<0b0001, "fsub", "FSUB_ZPZZ", int_aarch64_sve_fsub, DestructiveBinaryCommWithRev, "FSUBR_ZPmZ">; - defm FMUL_ZPmZ : sve_fp_2op_p_zds<0b0010, "fmul", "FMUL_ZPZZ", int_aarch64_sve_fmul, DestructiveBinaryComm>; + defm FADD_ZPmZ : sve_fp_2op_p_zds<0b0000, "fadd", "FADD_ZPZZ", AArch64fadd_m1, DestructiveBinaryComm>; + defm FSUB_ZPmZ : sve_fp_2op_p_zds<0b0001, "fsub", "FSUB_ZPZZ", AArch64fsub_m1, DestructiveBinaryCommWithRev, "FSUBR_ZPmZ">; + defm FMUL_ZPmZ : sve_fp_2op_p_zds<0b0010, "fmul", "FMUL_ZPZZ", AArch64fmul_m1, DestructiveBinaryComm>; defm FSUBR_ZPmZ : sve_fp_2op_p_zds<0b0011, "fsubr", "FSUBR_ZPZZ", int_aarch64_sve_fsubr, DestructiveBinaryCommWithRev, "FSUB_ZPmZ", /*isReverseInstr*/ 1>; defm FMAXNM_ZPmZ : sve_fp_2op_p_zds<0b0100, "fmaxnm", "FMAXNM_ZPZZ", int_aarch64_sve_fmaxnm, DestructiveBinaryComm>; defm FMINNM_ZPmZ : sve_fp_2op_p_zds<0b0101, "fminnm", "FMINNM_ZPZZ", int_aarch64_sve_fminnm, DestructiveBinaryComm>; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -513,6 +513,17 @@ (vt (AArch64dup (it immL))))), (inst $Pg, $Zs1, imm)>; +//===----------------------------------------------------------------------===// +// SVE pattern match helpers. +//===----------------------------------------------------------------------===// + +// Matches either an intrinsic, or a predicated operation with an all active predicate +class EitherVSelectOrPassthruPatFrags +: PatFrags<(ops node:$Pg, node:$Op1, node:$Op2), [ + (intrinsic node:$Pg, node:$Op1, node:$Op2), + (vselect node:$Pg, (sdnode (SVEAllActive), node:$Op1, node:$Op2), node:$Op1), + ]>; + // // Pseudo -> Instruction mappings // diff --git a/llvm/test/CodeGen/AArch64/sve-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fp-vselect.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fp-vselect.ll @@ -0,0 +1,92 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +define @vselect_fmul_f16( %p, %a, %b) { +; CHECK-LABEL: vselect_fmul_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %mul = fmul %a, %b + %sel = select %p, %mul, %a + ret %sel +} + +define @vselect_fmul_f32( %p, %a, %b) { +; CHECK-LABEL: vselect_fmul_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %mul = fmul %a, %b + %sel = select %p, %mul, %a + ret %sel +} + +define @vselect_fmul_f64( %p, %a, %b) { +; CHECK-LABEL: vselect_fmul_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %mul = fmul %a, %b + %sel = select %p, %mul, %a + ret %sel +} + +define @vselect_fadd_f16( %p, %a, %b) { +; CHECK-LABEL: vselect_fadd_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %add = fadd %a, %b + %sel = select %p, %add, %a + ret %sel +} + +define @vselect_fadd_f32( %p, %a, %b) { +; CHECK-LABEL: vselect_fadd_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %add = fadd %a, %b + %sel = select %p, %add, %a + ret %sel +} + +define @vselect_fadd_f64( %p, %a, %b) { +; CHECK-LABEL: vselect_fadd_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %add = fadd %a, %b + %sel = select %p, %add, %a + ret %sel +} + +define @vselect_fsub_f16( %p, %a, %b) { +; CHECK-LABEL: vselect_fsub_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %sub = fsub %a, %b + %sel = select %p, %sub, %a + ret %sel +} + +define @vselect_fsub_f32( %p, %a, %b) { +; CHECK-LABEL: vselect_fsub_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %sub = fsub %a, %b + %sel = select %p, %sub, %a + ret %sel +} + +define @vselect_fsub_f64( %p, %a, %b) { +; CHECK-LABEL: vselect_fsub_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %sub = fsub %a, %b + %sel = select %p, %sub, %a + ret %sel +}