Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -340,6 +340,32 @@ return N->hasOneUse(); }]>; +def AArch64SVEadd_splat_addend : PatFrag<(ops node:$src1, node:$src2), + (add node:$src1, node:$src2), [{ + // check if first operand is constant splat + if(N->getOperand(1).getOpcode() == ISD::SPLAT_VECTOR) + return true; + return false; +}]>; + +def AArch64mul_p_firstOpndWithSingleUse : PatFrag<(ops node:$pred, node:$src1, node:$src2), + (AArch64mul_p node:$pred, node:$src1, node:$src2), [{ + SDNode* Op1 = N->getOperand(1).getNode(); + + // no of users of Op1 + // TO FIX: These uses are similar to IR users. So, uses() is a misnomer + unsigned Op1Uses = 0; + for(auto* Use: Op1->uses()) + { + // TOFIX: How do you check for other opcodes which dont actually + // translate into real instructions post-isel? + if(Use->getOpcode() == ISD::TokenFactor) + continue; + Op1Uses++; + } + return (N->hasOneUse() && Op1Uses<=1) ; +}]>; + def AArch64fmul_p_oneuse : PatFrag<(ops node:$pred, node:$src1, node:$src2), (AArch64fmul_p node:$pred, node:$src1, node:$src2), [{ return N->hasOneUse(); @@ -388,6 +414,9 @@ (add node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)), // add(a, select(mask, mul(b, c), splat(0))) -> mla(a, mask, b, c) (add node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>; +def AArch64mad_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3), + [(int_aarch64_sve_mad node:$pred, node:$op1, node:$op2, node:$op3), + (AArch64SVEadd_splat_addend node:$op3, (AArch64mul_p_firstOpndWithSingleUse node:$pred, node:$op1, node:$op2))]>; def AArch64mls_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3), [(int_aarch64_sve_mls node:$pred, node:$op1, node:$op2, node:$op3), (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)), @@ -467,7 +496,7 @@ defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>; defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>; - defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>; + defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", AArch64mad_m1>; defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>; defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", AArch64mla_m1>; defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", AArch64mls_m1>; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -480,6 +480,14 @@ : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3, vt4:$Op4)), (inst $Op1, $Op2, $Op3, $Op4)>; +let AddedComplexity = 8 in { +class SVE_4_FusedMulAddSub_Op_Pat +: Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3, vt4:$Op4)), + (inst $Op1, $Op2, $Op3, $Op4)>; +} + class SVE_2_Op_Imm_Pat : Pat<(vtd (op vt1:$Op1, (vt2 ImmTy:$Op2))), @@ -3103,10 +3111,10 @@ def _S : sve_int_mladdsub_vvv_pred<0b10, opc, asm, ZPR32>; def _D : sve_int_mladdsub_vvv_pred<0b11, opc, asm, ZPR64>; - def : SVE_4_Op_Pat(NAME # _B)>; - def : SVE_4_Op_Pat(NAME # _H)>; - def : SVE_4_Op_Pat(NAME # _S)>; - def : SVE_4_Op_Pat(NAME # _D)>; + def : SVE_4_FusedMulAddSub_Op_Pat(NAME # _B)>; + def : SVE_4_FusedMulAddSub_Op_Pat(NAME # _H)>; + def : SVE_4_FusedMulAddSub_Op_Pat(NAME # _S)>; + def : SVE_4_FusedMulAddSub_Op_Pat(NAME # _D)>; } class sve_int_mlas_vvv_pred sz8_64, bits<1> opc, string asm, Index: llvm/test/CodeGen/AArch64/sve-multiply-add-accumulate.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-multiply-add-accumulate.ll @@ -0,0 +1,173 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu -mattr=+sve < %s | FileCheck %s + +define @muladd_i64_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i64_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, #0xffffffff +; CHECK-NEXT: mad z0.d, p0/m, z1.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i64 4294967295, i64 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i64_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i64_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, #0xffffffff00000001 +; CHECK-NEXT: mad z0.d, p0/m, z1.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i64 -4294967295, i64 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i32_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i32_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, #0x10000 +; CHECK-NEXT: mad z0.s, p0/m, z1.s, z2.s +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i32 65536, i32 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i32_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i32_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, #0xffff0000 +; CHECK-NEXT: mad z0.s, p0/m, z1.s, z2.s +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i32 -65536, i32 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i16_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i16_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, #255 // =0xff +; CHECK-NEXT: mad z0.h, p0/m, z1.h, z2.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 255, i16 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i16_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i16_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, #-255 // =0xffffffffffffff01 +; CHECK-NEXT: mad z0.h, p0/m, z1.h, z2.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 -255, i16 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i8_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i8_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: mov z2.b, #15 // =0xf +; CHECK-NEXT: mad z0.b, p0/m, z1.b, z2.b +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i8 15, i8 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i8_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i8_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: mov z2.b, #-15 // =0xfffffffffffffff1 +; CHECK-NEXT: mad z0.b, p0/m, z1.b, z2.b +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i8 -15, i8 0), poison, zeroinitializer) + ret %2 +} + +; both mul operands have a use +define @muladd_generic_test1( %a, %b) +; CHECK-LABEL: muladd_generic_test1: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: add z2.h, z2.h, #200 // =0xc8 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: sub z0.h, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %a + %4 = sub %3, %b + ret %4 +} + +; only the first mul operand has a use +define @muladd_generic_test2( %a, %b) +; CHECK-LABEL: muladd_generic_test2: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: add z1.h, z1.h, #200 // =0xc8 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %a + ret %3 +} + +; only the second mul operand has a use +define @muladd_generic_test3( %a, %b) +; CHECK-LABEL: muladd_generic_test3: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #200 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: mad z0.h, p0/m, z1.h, z2.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %b + ret %3 +} + +; negative integer splat as one of the addend +define @muladd_generic_test4( %a, %b) +; CHECK-LABEL: muladd_generic_test4: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-200 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: mad z0.h, p0/m, z1.h, z2.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 -200, i16 0), poison, zeroinitializer) + ret %2 +}