Index: llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -125,12 +125,12 @@ getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO}) .legalFor({{s32, s1}, {s64, s1}}); - getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV, G_FNEG}) + getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG}) .legalFor({s32, s64, v2s64, v4s32, v2s32}); getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64}); - getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR}) + getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FMA}) // If we don't have full FP16 support, then scalarize the elements of // vectors containing fp16 types. .fewerElementsIf( Index: llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -389,6 +389,7 @@ case TargetOpcode::G_FADD: case TargetOpcode::G_FSUB: case TargetOpcode::G_FMUL: + case TargetOpcode::G_FMA: case TargetOpcode::G_FDIV: case TargetOpcode::G_FCONSTANT: case TargetOpcode::G_FPEXT: Index: llvm/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/GlobalISel/legalize-fma.mir @@ -0,0 +1,274 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -run-pass=legalizer -simplify-mir -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s + +... +--- +name: test_f16.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $h0, $h1, $h2 + + ; CHECK-LABEL: name: test_f16.fma + ; CHECK: liveins: $h0, $h1, $h2 + ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $h1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s16) = COPY $h2 + ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[COPY1]](s16) + ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[COPY2]](s16) + ; CHECK: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] + ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) + ; CHECK: $h0 = COPY [[FPTRUNC]](s16) + ; CHECK: RET_ReallyLR implicit $h0 + %0:_(s16) = COPY $h0 + %1:_(s16) = COPY $h1 + %2:_(s16) = COPY $h2 + %3:_(s16) = G_FMA %0, %1, %2 + $h0 = COPY %3(s16) + RET_ReallyLR implicit $h0 + +... +--- +name: test_f32.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $s0, $s1, $s2 + + ; CHECK-LABEL: name: test_f32.fma + ; CHECK: liveins: $s0, $s1, $s2 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s2 + ; CHECK: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $s0 = COPY [[FMA]](s32) + ; CHECK: RET_ReallyLR implicit $s0 + %0:_(s32) = COPY $s0 + %1:_(s32) = COPY $s1 + %2:_(s32) = COPY $s2 + %3:_(s32) = G_FMA %0, %1, %2 + $s0 = COPY %3(s32) + RET_ReallyLR implicit $s0 + +... +--- +name: test_f64.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $d0, $d1, $d2 + + ; CHECK-LABEL: name: test_f64.fma + ; CHECK: liveins: $d0, $d1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $d1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $d2 + ; CHECK: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $d0 = COPY [[FMA]](s64) + ; CHECK: RET_ReallyLR implicit $d0 + %0:_(s64) = COPY $d0 + %1:_(s64) = COPY $d1 + %2:_(s64) = COPY $d2 + %3:_(s64) = G_FMA %0, %1, %2 + $d0 = COPY %3(s64) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v4f32.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $q0, $q1, $q2 + + ; CHECK-LABEL: name: test_v4f32.fma + ; CHECK: liveins: $q0, $q1, $q2 + ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $q2 + ; CHECK: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $q0 = COPY [[FMA]](<4 x s32>) + ; CHECK: RET_ReallyLR implicit $q0 + %0:_(<4 x s32>) = COPY $q0 + %1:_(<4 x s32>) = COPY $q1 + %2:_(<4 x s32>) = COPY $q2 + %3:_(<4 x s32>) = G_FMA %0, %1, %2 + $q0 = COPY %3(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f64.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $q0, $q1, $q2 + + ; CHECK-LABEL: name: test_v2f64.fma + ; CHECK: liveins: $q0, $q1, $q2 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2 + ; CHECK: [[FMA:%[0-9]+]]:_(<2 x s64>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $q0 = COPY [[FMA]](<2 x s64>) + ; CHECK: RET_ReallyLR implicit $q0 + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>) = COPY $q1 + %2:_(<2 x s64>) = COPY $q2 + %3:_(<2 x s64>) = G_FMA %0, %1, %2 + $q0 = COPY %3(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v4f16.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $d0, $d1, $d2 + + ; CHECK-LABEL: name: test_v4f16.fma + ; CHECK: liveins: $d0, $d1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $d2 + ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; CHECK: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>) + ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) + ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) + ; CHECK: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] + ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) + ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) + ; CHECK: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) + ; CHECK: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16) + ; CHECK: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] + ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) + ; CHECK: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) + ; CHECK: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) + ; CHECK: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16) + ; CHECK: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] + ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) + ; CHECK: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) + ; CHECK: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) + ; CHECK: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16) + ; CHECK: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]] + ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16) + ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; CHECK: RET_ReallyLR implicit $d0 + %0:_(<4 x s16>) = COPY $d0 + %1:_(<4 x s16>) = COPY $d1 + %2:_(<4 x s16>) = COPY $d2 + %3:_(<4 x s16>) = G_FMA %0, %1, %2 + $d0 = COPY %3(<4 x s16>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v2f32.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $d0, $d1, $d2 + + ; CHECK-LABEL: name: test_v2f32.fma + ; CHECK: liveins: $d0, $d1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $d2 + ; CHECK: [[FMA:%[0-9]+]]:_(<2 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $d0 = COPY [[FMA]](<2 x s32>) + ; CHECK: RET_ReallyLR implicit $d0 + %0:_(<2 x s32>) = COPY $d0 + %1:_(<2 x s32>) = COPY $d1 + %2:_(<2 x s32>) = COPY $d2 + %3:_(<2 x s32>) = G_FMA %0, %1, %2 + $d0 = COPY %3(<2 x s32>) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v8f16.fma +alignment: 2 +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $q0, $q1, $q2 + + ; CHECK-LABEL: name: test_v8f16.fma + ; CHECK: liveins: $q0, $q1, $q2 + ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:_(<8 x s16>) = COPY $q2 + ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>) + ; CHECK: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<8 x s16>) + ; CHECK: [[UV16:%[0-9]+]]:_(s16), [[UV17:%[0-9]+]]:_(s16), [[UV18:%[0-9]+]]:_(s16), [[UV19:%[0-9]+]]:_(s16), [[UV20:%[0-9]+]]:_(s16), [[UV21:%[0-9]+]]:_(s16), [[UV22:%[0-9]+]]:_(s16), [[UV23:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<8 x s16>) + ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) + ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV16]](s16) + ; CHECK: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] + ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) + ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) + ; CHECK: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16) + ; CHECK: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV17]](s16) + ; CHECK: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] + ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) + ; CHECK: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) + ; CHECK: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16) + ; CHECK: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV18]](s16) + ; CHECK: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] + ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) + ; CHECK: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) + ; CHECK: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16) + ; CHECK: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV19]](s16) + ; CHECK: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]] + ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32) + ; CHECK: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) + ; CHECK: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[UV12]](s16) + ; CHECK: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[UV20]](s16) + ; CHECK: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FPEXT12]], [[FPEXT13]], [[FPEXT14]] + ; CHECK: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA4]](s32) + ; CHECK: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) + ; CHECK: [[FPEXT16:%[0-9]+]]:_(s32) = G_FPEXT [[UV13]](s16) + ; CHECK: [[FPEXT17:%[0-9]+]]:_(s32) = G_FPEXT [[UV21]](s16) + ; CHECK: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FPEXT15]], [[FPEXT16]], [[FPEXT17]] + ; CHECK: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA5]](s32) + ; CHECK: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) + ; CHECK: [[FPEXT19:%[0-9]+]]:_(s32) = G_FPEXT [[UV14]](s16) + ; CHECK: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[UV22]](s16) + ; CHECK: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FPEXT18]], [[FPEXT19]], [[FPEXT20]] + ; CHECK: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA6]](s32) + ; CHECK: [[FPEXT21:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) + ; CHECK: [[FPEXT22:%[0-9]+]]:_(s32) = G_FPEXT [[UV15]](s16) + ; CHECK: [[FPEXT23:%[0-9]+]]:_(s32) = G_FPEXT [[UV23]](s16) + ; CHECK: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FPEXT21]], [[FPEXT22]], [[FPEXT23]] + ; CHECK: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA7]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16) + ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>) + ; CHECK: RET_ReallyLR implicit $q0 + %0:_(<8 x s16>) = COPY $q0 + %1:_(<8 x s16>) = COPY $q1 + %2:_(<8 x s16>) = COPY $q2 + %3:_(<8 x s16>) = G_FMA %0, %1, %2 + $q0 = COPY %3(<8 x s16>) + RET_ReallyLR implicit $q0 + +... Index: llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-fma.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-fma.mir @@ -0,0 +1,168 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -run-pass=regbankselect -simplify-mir -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s + +... +--- +name: test_f16.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $h0, $h1, $h2 + + ; CHECK-LABEL: name: test_f16.fma + ; CHECK: liveins: $h0, $h1, $h2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s16) = COPY $h0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(s16) = COPY $h1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(s16) = COPY $h2 + ; CHECK: [[FPEXT:%[0-9]+]]:fpr(s32) = G_FPEXT [[COPY]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:fpr(s32) = G_FPEXT [[COPY1]](s16) + ; CHECK: [[FPEXT2:%[0-9]+]]:fpr(s32) = G_FPEXT [[COPY2]](s16) + ; CHECK: [[FMA:%[0-9]+]]:fpr(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] + ; CHECK: [[FPTRUNC:%[0-9]+]]:fpr(s16) = G_FPTRUNC [[FMA]](s32) + ; CHECK: $h0 = COPY [[FPTRUNC]](s16) + ; CHECK: RET_ReallyLR implicit $h0 + %0:_(s16) = COPY $h0 + %1:_(s16) = COPY $h1 + %2:_(s16) = COPY $h2 + %4:_(s32) = G_FPEXT %0(s16) + %5:_(s32) = G_FPEXT %1(s16) + %6:_(s32) = G_FPEXT %2(s16) + %7:_(s32) = G_FMA %4, %5, %6 + %3:_(s16) = G_FPTRUNC %7(s32) + $h0 = COPY %3(s16) + RET_ReallyLR implicit $h0 + +... +--- +name: test_f32.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $s0, $s1, $s2 + + ; CHECK-LABEL: name: test_f32.fma + ; CHECK: liveins: $s0, $s1, $s2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $s1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(s32) = COPY $s2 + ; CHECK: [[FMA:%[0-9]+]]:fpr(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $s0 = COPY [[FMA]](s32) + ; CHECK: RET_ReallyLR implicit $s0 + %0:_(s32) = COPY $s0 + %1:_(s32) = COPY $s1 + %2:_(s32) = COPY $s2 + %3:_(s32) = G_FMA %0, %1, %2 + $s0 = COPY %3(s32) + RET_ReallyLR implicit $s0 + +... +--- +name: test_f64.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $d0, $d1, $d2 + + ; CHECK-LABEL: name: test_f64.fma + ; CHECK: liveins: $d0, $d1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d2 + ; CHECK: [[FMA:%[0-9]+]]:fpr(s64) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $d0 = COPY [[FMA]](s64) + ; CHECK: RET_ReallyLR implicit $d0 + %0:_(s64) = COPY $d0 + %1:_(s64) = COPY $d1 + %2:_(s64) = COPY $d2 + %3:_(s64) = G_FMA %0, %1, %2 + $d0 = COPY %3(s64) + RET_ReallyLR implicit $d0 + +... +--- +name: test_v4f32.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $q0, $q1, $q2 + + ; CHECK-LABEL: name: test_v4f32.fma + ; CHECK: liveins: $q0, $q1, $q2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(<4 x s32>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(<4 x s32>) = COPY $q2 + ; CHECK: [[FMA:%[0-9]+]]:fpr(<4 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $q0 = COPY [[FMA]](<4 x s32>) + ; CHECK: RET_ReallyLR implicit $q0 + %0:_(<4 x s32>) = COPY $q0 + %1:_(<4 x s32>) = COPY $q1 + %2:_(<4 x s32>) = COPY $q2 + %3:_(<4 x s32>) = G_FMA %0, %1, %2 + $q0 = COPY %3(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f64.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $q0, $q1, $q2 + + ; CHECK-LABEL: name: test_v2f64.fma + ; CHECK: liveins: $q0, $q1, $q2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(<2 x s64>) = COPY $q1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(<2 x s64>) = COPY $q2 + ; CHECK: [[FMA:%[0-9]+]]:fpr(<2 x s64>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $q0 = COPY [[FMA]](<2 x s64>) + ; CHECK: RET_ReallyLR implicit $q0 + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>) = COPY $q1 + %2:_(<2 x s64>) = COPY $q2 + %3:_(<2 x s64>) = G_FMA %0, %1, %2 + $q0 = COPY %3(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_v2f32.fma +alignment: 2 +legalized: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $d0, $d1, $d2 + + ; CHECK-LABEL: name: test_v2f32.fma + ; CHECK: liveins: $d0, $d1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr(<2 x s32>) = COPY $d1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr(<2 x s32>) = COPY $d2 + ; CHECK: [[FMA:%[0-9]+]]:fpr(<2 x s32>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $d0 = COPY [[FMA]](<2 x s32>) + ; CHECK: RET_ReallyLR implicit $d0 + %0:_(<2 x s32>) = COPY $d0 + %1:_(<2 x s32>) = COPY $d1 + %2:_(<2 x s32>) = COPY $d2 + %3:_(<2 x s32>) = G_FMA %0, %1, %2 + $d0 = COPY %3(<2 x s32>) + RET_ReallyLR implicit $d0 + +... Index: llvm/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll +++ llvm/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll @@ -120,11 +120,17 @@ %1 = call %v4f16 @llvm.log2.v4f16(%v4f16 %a) ret %v4f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f16.fma define %v4f16 @test_v4f16.fma(%v4f16 %a, %v4f16 %b, %v4f16 %c) { ; CHECK-LABEL: test_v4f16.fma: ; CHECK-NOFP16-COUNT-4: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} ; CHECK-FP16-NOT: fcvt ; CHECK-FP16: fmla.4h + ; GISEL-LABEL: test_v4f16.fma: + ; GISEL-NOFP16-COUNT-4: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} + ; GISEL-FP16-NOT: fcvt + ; GISEL-FP16: fmla.4h %1 = call %v4f16 @llvm.fma.v4f16(%v4f16 %a, %v4f16 %b, %v4f16 %c) ret %v4f16 %1 } @@ -345,11 +351,17 @@ %1 = call %v8f16 @llvm.log2.v8f16(%v8f16 %a) ret %v8f16 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v8f16.fma define %v8f16 @test_v8f16.fma(%v8f16 %a, %v8f16 %b, %v8f16 %c) { ; CHECK-LABEL: test_v8f16.fma: ; CHECK-NOFP16-COUNT-8: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} ; CHECK-FP16-NOT: fcvt ; CHECK-FP16: fmla.8h + ; GISEL-LABEL: test_v8f16.fma: + ; GISEL-NOFP16-COUNT-8: fmadd s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} + ; GISEL-FP16-NOT: fcvt + ; GISEL-FP16: fmla.8h %1 = call %v8f16 @llvm.fma.v8f16(%v8f16 %a, %v8f16 %b, %v8f16 %c) ret %v8f16 %1 } @@ -550,9 +562,13 @@ %1 = call %v2f32 @llvm.log2.v2f32(%v2f32 %a) ret %v2f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f32.fma ; CHECK-LABEL: test_v2f32.fma: +; GISEL-LABEL: test_v2f32.fma: define %v2f32 @test_v2f32.fma(%v2f32 %a, %v2f32 %b, %v2f32 %c) { ; CHECK: fmla.2s + ; GISEL: fmla.2s %1 = call %v2f32 @llvm.fma.v2f32(%v2f32 %a, %v2f32 %b, %v2f32 %c) ret %v2f32 %1 } @@ -713,7 +729,10 @@ %1 = call %v4f32 @llvm.log2.v4f32(%v4f32 %a) ret %v4f32 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v4f32.fma ; CHECK: test_v4f32.fma: +; GISEL: test_v4f32.fma: define %v4f32 @test_v4f32.fma(%v4f32 %a, %v4f32 %b, %v4f32 %c) { ; CHECK: fma %1 = call %v4f32 @llvm.fma.v4f32(%v4f32 %a, %v4f32 %b, %v4f32 %c) @@ -876,9 +895,13 @@ %1 = call %v2f64 @llvm.log2.v2f64(%v2f64 %a) ret %v2f64 %1 } + +; FALLBACK-NOT: remark{{.*}}test_v2f64.fma ; CHECK: test_v2f64.fma: +; GISEL: test_v2f64.fma: define %v2f64 @test_v2f64.fma(%v2f64 %a, %v2f64 %b, %v2f64 %c) { ; CHECK: fma + ; GISEL: fma %1 = call %v2f64 @llvm.fma.v2f64(%v2f64 %a, %v2f64 %b, %v2f64 %c) ret %v2f64 %1 }