diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -626,6 +626,9 @@ // Vector signed/unsigned integer to float/double. STRICT_CVTSI2P, STRICT_CVTUI2P, + // Strict FMA nodes. + STRICT_FNMADD, STRICT_FMSUB, STRICT_FNMSUB, + // Compare and swap. LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, LCMPXCHG8_DAG, diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2000,6 +2000,7 @@ setTargetDAGCombine(ISD::FSUB); setTargetDAGCombine(ISD::FNEG); setTargetDAGCombine(ISD::FMA); + setTargetDAGCombine(ISD::STRICT_FMA); setTargetDAGCombine(ISD::FMINNUM); setTargetDAGCombine(ISD::FMAXNUM); setTargetDAGCombine(ISD::SUB); @@ -29817,8 +29818,11 @@ case X86ISD::VPCOMU: return "X86ISD::VPCOMU"; case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2"; case X86ISD::FMSUB: return "X86ISD::FMSUB"; + case X86ISD::STRICT_FMSUB: return "X86ISD::STRICT_FMSUB"; case X86ISD::FNMADD: return "X86ISD::FNMADD"; + case X86ISD::STRICT_FNMADD: return "X86ISD::STRICT_FNMADD"; case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; + case X86ISD::STRICT_FNMSUB: return "X86ISD::STRICT_FNMSUB"; case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND"; @@ -42514,37 +42518,46 @@ if (NegMul) { switch (Opcode) { default: llvm_unreachable("Unexpected opcode"); - case ISD::FMA: Opcode = X86ISD::FNMADD; break; - case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break; - case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break; - case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break; - case X86ISD::FNMADD: Opcode = ISD::FMA; break; - case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break; - case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break; - case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break; + case ISD::FMA: Opcode = X86ISD::FNMADD; break; + case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break; + case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break; + case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break; + case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break; + case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break; + case X86ISD::FNMADD: Opcode = ISD::FMA; break; + case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break; + case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break; + case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break; + case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break; + case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break; } } if (NegAcc) { switch (Opcode) { default: llvm_unreachable("Unexpected opcode"); - case ISD::FMA: Opcode = X86ISD::FMSUB; break; - case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break; - case X86ISD::FMSUB: Opcode = ISD::FMA; break; - case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break; - case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break; - case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break; - case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break; - case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break; - case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break; - case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break; - case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break; - case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break; + case ISD::FMA: Opcode = X86ISD::FMSUB; break; + case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break; + case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break; + case X86ISD::FMSUB: Opcode = ISD::FMA; break; + case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break; + case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break; + case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break; + case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break; + case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break; + case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break; + case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break; + case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break; + case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break; + case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break; + case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break; + case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break; } } if (NegRes) { switch (Opcode) { + // For accuracy reason, we never combine fneg and fma under strict FP. default: llvm_unreachable("Unexpected opcode"); case ISD::FMA: Opcode = X86ISD::FNMSUB; break; case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break; @@ -43516,6 +43529,7 @@ const X86Subtarget &Subtarget) { SDLoc dl(N); EVT VT = N->getValueType(0); + bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode(); // Let legalize expand this if it isn't a legal type yet. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -43526,9 +43540,9 @@ if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA()) return SDValue(); - SDValue A = N->getOperand(0); - SDValue B = N->getOperand(1); - SDValue C = N->getOperand(2); + SDValue A = N->getOperand(IsStrict ? 1 : 0); + SDValue B = N->getOperand(IsStrict ? 2 : 1); + SDValue C = N->getOperand(IsStrict ? 3 : 2); auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) { bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); @@ -43566,9 +43580,15 @@ unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false); - if (N->getNumOperands() == 4) - return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3)); - return DAG.getNode(NewOpcode, dl, VT, A, B, C); + if (IsStrict) { + assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4"); + return DAG.getNode(NewOpcode, dl, {VT, MVT::Other}, + {N->getOperand(0), A, B, C}); + } else { + if (N->getNumOperands() == 4) + return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3)); + return DAG.getNode(NewOpcode, dl, VT, A, B, C); + } } // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C) @@ -46071,12 +46091,16 @@ case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget); case X86ISD::FMADD_RND: case X86ISD::FMSUB: + case X86ISD::STRICT_FMSUB: case X86ISD::FMSUB_RND: case X86ISD::FNMADD: + case X86ISD::STRICT_FNMADD: case X86ISD::FNMADD_RND: case X86ISD::FNMSUB: + case X86ISD::STRICT_FNMSUB: case X86ISD::FNMSUB_RND: - case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget); + case ISD::FMA: + case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget); case X86ISD::FMADDSUB_RND: case X86ISD::FMSUBADD_RND: case X86ISD::FMADDSUB: diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -6487,11 +6487,11 @@ } defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", X86any_Fmadd, X86FmaddRnd>; -defm VFMSUB213 : avx512_fma3p_213_f<0xAA, "vfmsub213", X86Fmsub, X86FmsubRnd>; +defm VFMSUB213 : avx512_fma3p_213_f<0xAA, "vfmsub213", X86any_Fmsub, X86FmsubRnd>; defm VFMADDSUB213 : avx512_fma3p_213_f<0xA6, "vfmaddsub213", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD213 : avx512_fma3p_213_f<0xA7, "vfmsubadd213", X86Fmsubadd, X86FmsubaddRnd>; -defm VFNMADD213 : avx512_fma3p_213_f<0xAC, "vfnmadd213", X86Fnmadd, X86FnmaddRnd>; -defm VFNMSUB213 : avx512_fma3p_213_f<0xAE, "vfnmsub213", X86Fnmsub, X86FnmsubRnd>; +defm VFNMADD213 : avx512_fma3p_213_f<0xAC, "vfnmadd213", X86any_Fnmadd, X86FnmaddRnd>; +defm VFNMSUB213 : avx512_fma3p_213_f<0xAE, "vfnmsub213", X86any_Fnmsub, X86FnmsubRnd>; multiclass avx512_fma3p_231_rm opc, string OpcodeStr, SDNode OpNode, @@ -6565,11 +6565,11 @@ } defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", X86any_Fmadd, X86FmaddRnd>; -defm VFMSUB231 : avx512_fma3p_231_f<0xBA, "vfmsub231", X86Fmsub, X86FmsubRnd>; +defm VFMSUB231 : avx512_fma3p_231_f<0xBA, "vfmsub231", X86any_Fmsub, X86FmsubRnd>; defm VFMADDSUB231 : avx512_fma3p_231_f<0xB6, "vfmaddsub231", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD231 : avx512_fma3p_231_f<0xB7, "vfmsubadd231", X86Fmsubadd, X86FmsubaddRnd>; -defm VFNMADD231 : avx512_fma3p_231_f<0xBC, "vfnmadd231", X86Fnmadd, X86FnmaddRnd>; -defm VFNMSUB231 : avx512_fma3p_231_f<0xBE, "vfnmsub231", X86Fnmsub, X86FnmsubRnd>; +defm VFNMADD231 : avx512_fma3p_231_f<0xBC, "vfnmadd231", X86any_Fnmadd, X86FnmaddRnd>; +defm VFNMSUB231 : avx512_fma3p_231_f<0xBE, "vfnmsub231", X86any_Fnmsub, X86FnmsubRnd>; multiclass avx512_fma3p_132_rm opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, @@ -6645,11 +6645,11 @@ } defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", X86any_Fmadd, X86FmaddRnd>; -defm VFMSUB132 : avx512_fma3p_132_f<0x9A, "vfmsub132", X86Fmsub, X86FmsubRnd>; +defm VFMSUB132 : avx512_fma3p_132_f<0x9A, "vfmsub132", X86any_Fmsub, X86FmsubRnd>; defm VFMADDSUB132 : avx512_fma3p_132_f<0x96, "vfmaddsub132", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD132 : avx512_fma3p_132_f<0x97, "vfmsubadd132", X86Fmsubadd, X86FmsubaddRnd>; -defm VFNMADD132 : avx512_fma3p_132_f<0x9C, "vfnmadd132", X86Fnmadd, X86FnmaddRnd>; -defm VFNMSUB132 : avx512_fma3p_132_f<0x9E, "vfnmsub132", X86Fnmsub, X86FnmsubRnd>; +defm VFNMADD132 : avx512_fma3p_132_f<0x9C, "vfnmadd132", X86any_Fnmadd, X86FnmaddRnd>; +defm VFNMSUB132 : avx512_fma3p_132_f<0x9E, "vfnmsub132", X86any_Fnmsub, X86FnmsubRnd>; // Scalar FMA multiclass avx512_fma3s_common opc, string OpcodeStr, X86VectorVTInfo _, @@ -6742,9 +6742,9 @@ } defm VFMADD : avx512_fma3s<0xA9, 0xB9, 0x99, "vfmadd", X86any_Fmadd, X86FmaddRnd>; -defm VFMSUB : avx512_fma3s<0xAB, 0xBB, 0x9B, "vfmsub", X86Fmsub, X86FmsubRnd>; -defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd, X86FnmaddRnd>; -defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub, X86FnmsubRnd>; +defm VFMSUB : avx512_fma3s<0xAB, 0xBB, 0x9B, "vfmsub", X86any_Fmsub, X86FmsubRnd>; +defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86any_Fnmadd, X86FnmaddRnd>; +defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86any_Fnmsub, X86FnmsubRnd>; multiclass avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; defm : avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; -defm : avx512_scalar_fma_patterns; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/X86InstrFMA.td b/llvm/lib/Target/X86/X86InstrFMA.td --- a/llvm/lib/Target/X86/X86InstrFMA.td +++ b/llvm/lib/Target/X86/X86InstrFMA.td @@ -126,7 +126,7 @@ loadv4f32, loadv8f32, X86any_Fmadd, v4f32, v8f32, SchedWriteFMA>; defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", "PS", - loadv4f32, loadv8f32, X86Fmsub, v4f32, v8f32, + loadv4f32, loadv8f32, X86any_Fmsub, v4f32, v8f32, SchedWriteFMA>; defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", "PS", loadv4f32, loadv8f32, X86Fmaddsub, v4f32, v8f32, @@ -141,7 +141,7 @@ loadv2f64, loadv4f64, X86any_Fmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W; defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", "PD", - loadv2f64, loadv4f64, X86Fmsub, v2f64, + loadv2f64, loadv4f64, X86any_Fmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W; defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", "PD", loadv2f64, loadv4f64, X86Fmaddsub, @@ -154,15 +154,15 @@ // Fused Negative Multiply-Add let ExeDomain = SSEPackedSingle in { defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", "PS", loadv4f32, - loadv8f32, X86Fnmadd, v4f32, v8f32, SchedWriteFMA>; + loadv8f32, X86any_Fnmadd, v4f32, v8f32, SchedWriteFMA>; defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", "PS", loadv4f32, - loadv8f32, X86Fnmsub, v4f32, v8f32, SchedWriteFMA>; + loadv8f32, X86any_Fnmsub, v4f32, v8f32, SchedWriteFMA>; } let ExeDomain = SSEPackedDouble in { defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", "PD", loadv2f64, - loadv4f64, X86Fnmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W; + loadv4f64, X86any_Fnmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W; defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", "PD", loadv2f64, - loadv4f64, X86Fnmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W; + loadv4f64, X86any_Fnmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W; } // All source register operands of FMA opcodes defined in fma3s_rm multiclass @@ -321,12 +321,12 @@ defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", X86any_Fmadd, SchedWriteFMA.Scl>, VEX_LIG; -defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86Fmsub, +defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86any_Fmsub, SchedWriteFMA.Scl>, VEX_LIG; -defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86Fnmadd, +defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86any_Fnmadd, SchedWriteFMA.Scl>, VEX_LIG; -defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86Fnmsub, +defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86any_Fnmsub, SchedWriteFMA.Scl>, VEX_LIG; multiclass scalar_fma_patterns; -defm : scalar_fma_patterns; -defm : scalar_fma_patterns; -defm : scalar_fma_patterns; +defm : scalar_fma_patterns; +defm : scalar_fma_patterns; +defm : scalar_fma_patterns; defm : scalar_fma_patterns; -defm : scalar_fma_patterns; -defm : scalar_fma_patterns; -defm : scalar_fma_patterns; +defm : scalar_fma_patterns; +defm : scalar_fma_patterns; +defm : scalar_fma_patterns; //===----------------------------------------------------------------------===// // FMA4 - AMD 4 operand Fused Multiply-Add instructions @@ -542,26 +542,26 @@ SchedWriteFMA.Scl>, fma4s_int<0x6A, "vfmaddss", ssmem, v4f32, SchedWriteFMA.Scl>; - defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32, + defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86any_Fmsub, loadf32, SchedWriteFMA.Scl>, fma4s_int<0x6E, "vfmsubss", ssmem, v4f32, SchedWriteFMA.Scl>; defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32, - X86Fnmadd, loadf32, SchedWriteFMA.Scl>, + X86any_Fnmadd, loadf32, SchedWriteFMA.Scl>, fma4s_int<0x7A, "vfnmaddss", ssmem, v4f32, SchedWriteFMA.Scl>; defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32, - X86Fnmsub, loadf32, SchedWriteFMA.Scl>, + X86any_Fnmsub, loadf32, SchedWriteFMA.Scl>, fma4s_int<0x7E, "vfnmsubss", ssmem, v4f32, SchedWriteFMA.Scl>; // Packed Instructions defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86any_Fmadd, v4f32, v8f32, loadv4f32, loadv8f32, SchedWriteFMA>; - defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32, + defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86any_Fmsub, v4f32, v8f32, loadv4f32, loadv8f32, SchedWriteFMA>; - defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32, + defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86any_Fnmadd, v4f32, v8f32, loadv4f32, loadv8f32, SchedWriteFMA>; - defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32, + defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86any_Fnmsub, v4f32, v8f32, loadv4f32, loadv8f32, SchedWriteFMA>; defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32, loadv4f32, loadv8f32, SchedWriteFMA>; @@ -575,26 +575,26 @@ SchedWriteFMA.Scl>, fma4s_int<0x6B, "vfmaddsd", sdmem, v2f64, SchedWriteFMA.Scl>; - defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64, + defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86any_Fmsub, loadf64, SchedWriteFMA.Scl>, fma4s_int<0x6F, "vfmsubsd", sdmem, v2f64, SchedWriteFMA.Scl>; defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64, - X86Fnmadd, loadf64, SchedWriteFMA.Scl>, + X86any_Fnmadd, loadf64, SchedWriteFMA.Scl>, fma4s_int<0x7B, "vfnmaddsd", sdmem, v2f64, SchedWriteFMA.Scl>; defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64, - X86Fnmsub, loadf64, SchedWriteFMA.Scl>, + X86any_Fnmsub, loadf64, SchedWriteFMA.Scl>, fma4s_int<0x7F, "vfnmsubsd", sdmem, v2f64, SchedWriteFMA.Scl>; // Packed Instructions defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86any_Fmadd, v2f64, v4f64, loadv2f64, loadv4f64, SchedWriteFMA>; - defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64, + defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86any_Fmsub, v2f64, v4f64, loadv2f64, loadv4f64, SchedWriteFMA>; - defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64, + defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86any_Fnmadd, v2f64, v4f64, loadv2f64, loadv4f64, SchedWriteFMA>; - defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64, + defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86any_Fnmsub, v2f64, v4f64, loadv2f64, loadv4f64, SchedWriteFMA>; defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64, loadv2f64, loadv4f64, SchedWriteFMA>; @@ -630,11 +630,11 @@ } defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; -defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; +defm : scalar_fma4_patterns; diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -535,8 +535,20 @@ [(X86strict_Fmadd node:$src1, node:$src2, node:$src3), (X86Fmadd node:$src1, node:$src2, node:$src3)]>; def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFPTernaryOp, [SDNPCommutative]>; +def X86strict_Fnmadd : SDNode<"X86ISD::STRICT_FNMADD", SDTFPTernaryOp, [SDNPCommutative, SDNPHasChain]>; +def X86any_Fnmadd : PatFrags<(ops node:$src1, node:$src2, node:$src3), + [(X86strict_Fnmadd node:$src1, node:$src2, node:$src3), + (X86Fnmadd node:$src1, node:$src2, node:$src3)]>; def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFPTernaryOp, [SDNPCommutative]>; +def X86strict_Fmsub : SDNode<"X86ISD::STRICT_FMSUB", SDTFPTernaryOp, [SDNPCommutative, SDNPHasChain]>; +def X86any_Fmsub : PatFrags<(ops node:$src1, node:$src2, node:$src3), + [(X86strict_Fmsub node:$src1, node:$src2, node:$src3), + (X86Fmsub node:$src1, node:$src2, node:$src3)]>; def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFPTernaryOp, [SDNPCommutative]>; +def X86strict_Fnmsub : SDNode<"X86ISD::STRICT_FNMSUB", SDTFPTernaryOp, [SDNPCommutative, SDNPHasChain]>; +def X86any_Fnmsub : PatFrags<(ops node:$src1, node:$src2, node:$src3), + [(X86strict_Fnmsub node:$src1, node:$src2, node:$src3), + (X86Fnmsub node:$src1, node:$src2, node:$src3)]>; def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFPTernaryOp, [SDNPCommutative]>; def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFPTernaryOp, [SDNPCommutative]>; diff --git a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll --- a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll +++ b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll @@ -1,7 +1,326 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefixes=COMMON,NOFMA -; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s --check-prefixes=COMMON,FMA -; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=COMMON,FMA +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s --check-prefixes=COMMON,FMA,FMA-AVX1 +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma4 < %s | FileCheck %s --check-prefixes=COMMON,FMA4 +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=COMMON,FMA,FMA-AVX512 + +define float @f1(float %0, float %1, float %2) #0 { +; NOFMA-LABEL: f1: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: callq fmaf +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f1: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f1: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg float %0 + %result = call float @llvm.experimental.constrained.fma.f32(float %3, float %1, float %2, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret float %result +} + +define double @f2(double %0, double %1, double %2) #0 { +; NOFMA-LABEL: f2: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: callq fma +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f2: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f2: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg double %0 + %result = call double @llvm.experimental.constrained.fma.f64(double %3, double %1, double %2, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret double %result +} + +define float @f3(float %0, float %1, float %2) #0 { +; NOFMA-LABEL: f3: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm2 +; NOFMA-NEXT: callq fmaf +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f3: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f3: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg float %2 + %result = call float @llvm.experimental.constrained.fma.f32(float %0, float %1, float %3, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret float %result +} + +define double @f4(double %0, double %1, double %2) #0 { +; NOFMA-LABEL: f4: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm2 +; NOFMA-NEXT: callq fma +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f4: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f4: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg double %2 + %result = call double @llvm.experimental.constrained.fma.f64(double %0, double %1, double %3, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret double %result +} + +define float @f5(float %0, float %1, float %2) #0 { +; NOFMA-LABEL: f5: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; NOFMA-NEXT: xorps %xmm3, %xmm0 +; NOFMA-NEXT: xorps %xmm3, %xmm2 +; NOFMA-NEXT: callq fmaf +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f5: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f5: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg float %0 + %4 = fneg float %2 + %result = call float @llvm.experimental.constrained.fma.f32(float %3, float %1, float %4, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret float %result +} + +define double @f6(double %0, double %1, double %2) #0 { +; NOFMA-LABEL: f6: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0] +; NOFMA-NEXT: xorps %xmm3, %xmm0 +; NOFMA-NEXT: xorps %xmm3, %xmm2 +; NOFMA-NEXT: callq fma +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f6: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; FMA-NEXT: retq +; +; FMA4-LABEL: f6: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg double %0 + %4 = fneg double %2 + %result = call double @llvm.experimental.constrained.fma.f64(double %3, double %1, double %4, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret double %result +} + +define float @f7(float %0, float %1, float %2) #0 { +; NOFMA-LABEL: f7: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: callq fmaf +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-AVX1-LABEL: f7: +; FMA-AVX1: # %bb.0: # %entry +; FMA-AVX1-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; FMA-AVX1-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 +; FMA-AVX1-NEXT: retq +; +; FMA4-LABEL: f7: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; FMA-AVX512-LABEL: f7: +; FMA-AVX512: # %bb.0: # %entry +; FMA-AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; FMA-AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; FMA-AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0 +; FMA-AVX512-NEXT: retq +entry: + %3 = call float @llvm.experimental.constrained.fma.f32(float %0, float %1, float %2, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + %result = fneg float %3 + ret float %result +} + +define double @f8(double %0, double %1, double %2) #0 { +; NOFMA-LABEL: f8: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: callq fma +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f8: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; FMA-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: f8: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = call double @llvm.experimental.constrained.fma.f64(double %0, double %1, double %2, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + %result = fneg double %3 + ret double %result +} + +define float @f9(float %0, float %1, float %2) #0 { +; NOFMA-LABEL: f9: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; NOFMA-NEXT: xorps %xmm3, %xmm0 +; NOFMA-NEXT: xorps %xmm3, %xmm2 +; NOFMA-NEXT: callq fmaf +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-AVX1-LABEL: f9: +; FMA-AVX1: # %bb.0: # %entry +; FMA-AVX1-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; FMA-AVX1-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 +; FMA-AVX1-NEXT: retq +; +; FMA4-LABEL: f9: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; FMA-AVX512-LABEL: f9: +; FMA-AVX512: # %bb.0: # %entry +; FMA-AVX512-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; FMA-AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; FMA-AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0 +; FMA-AVX512-NEXT: retq +entry: + %3 = fneg float %0 + %4 = fneg float %2 + %5 = call float @llvm.experimental.constrained.fma.f32(float %3, float %1, float %4, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + %result = fneg float %5 + ret float %result +} + +define double @f10(double %0, double %1, double %2) #0 { +; NOFMA-LABEL: f10: +; NOFMA: # %bb.0: # %entry +; NOFMA-NEXT: pushq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 16 +; NOFMA-NEXT: movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0] +; NOFMA-NEXT: xorps %xmm3, %xmm0 +; NOFMA-NEXT: xorps %xmm3, %xmm2 +; NOFMA-NEXT: callq fma +; NOFMA-NEXT: xorps {{.*}}(%rip), %xmm0 +; NOFMA-NEXT: popq %rax +; NOFMA-NEXT: .cfi_def_cfa_offset 8 +; NOFMA-NEXT: retq +; +; FMA-LABEL: f10: +; FMA: # %bb.0: # %entry +; FMA-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; FMA-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: f10: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 +; FMA4-NEXT: retq +entry: + %3 = fneg double %0 + %4 = fneg double %2 + %5 = call double @llvm.experimental.constrained.fma.f64(double %3, double %1, double %4, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + %result = fneg double %5 + ret double %result +} ; Verify that fma(3.5) isn't simplified when the rounding mode is ; unknown. @@ -23,6 +342,12 @@ ; FMA-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0 ; FMA-NEXT: retq +; +; FMA4-LABEL: f17: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; FMA4-NEXT: vfmaddss %xmm0, %xmm0, %xmm0, %xmm0 +; FMA4-NEXT: retq entry: %result = call float @llvm.experimental.constrained.fma.f32( float 3.5, @@ -53,6 +378,12 @@ ; FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0 ; FMA-NEXT: retq +; +; FMA4-LABEL: f18: +; FMA4: # %bb.0: # %entry +; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; FMA4-NEXT: vfmaddsd %xmm0, %xmm0, %xmm0, %xmm0 +; FMA4-NEXT: retq entry: %result = call double @llvm.experimental.constrained.fma.f64( double 42.1,