Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5629,6 +5629,11 @@ if (LegalOperations && !isOperationLegalOrCustom(ISD::FSUB, VT)) return NegatibleCost::Expensive; + // Ignore fadd with fneg because that will be canonicalized to fsub. + if (Op.getOperand(0).getOpcode() == ISD::FNEG || + Op.getOperand(1).getOpcode() == ISD::FNEG) + return NegatibleCost::Expensive; + // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) NegatibleCost V0 = getNegatibleCost(Op.getOperand(0), DAG, LegalOperations, ForCodeSize, Depth + 1); Index: llvm/test/CodeGen/X86/fdiv.ll =================================================================== --- llvm/test/CodeGen/X86/fdiv.ll +++ llvm/test/CodeGen/X86/fdiv.ll @@ -85,11 +85,11 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movaps %xmm0, %xmm3 ; CHECK-NEXT: subss %xmm1, %xmm3 +; CHECK-NEXT: mulss %xmm2, %xmm3 ; CHECK-NEXT: subss %xmm0, %xmm1 -; CHECK-NEXT: mulss %xmm2, %xmm1 -; CHECK-NEXT: subss %xmm2, %xmm3 -; CHECK-NEXT: divss %xmm3, %xmm1 -; CHECK-NEXT: movaps %xmm1, %xmm0 +; CHECK-NEXT: addss %xmm2, %xmm1 +; CHECK-NEXT: divss %xmm1, %xmm3 +; CHECK-NEXT: movaps %xmm3, %xmm0 ; CHECK-NEXT: retq %sub1 = fsub fast float %a0, %a1 %mul2 = fmul fast float %sub1, %a2 Index: llvm/test/CodeGen/X86/neg_fp.ll =================================================================== --- llvm/test/CodeGen/X86/neg_fp.ll +++ llvm/test/CodeGen/X86/neg_fp.ll @@ -54,3 +54,32 @@ %t18 = fadd double %t16, %t7 ret double %t18 } + +; This would crash because the negated expression for %sub4 +; creates a new use of %sub1 and that alters the negated cost + +define float @fdiv_extra_use_changes_cost(float %a0, float %a1, float %a2) nounwind { +; CHECK-LABEL: fdiv_extra_use_changes_cost: +; CHECK: # %bb.0: +; CHECK-NEXT: pushl %eax +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: movaps %xmm2, %xmm3 +; CHECK-NEXT: subss %xmm1, %xmm3 +; CHECK-NEXT: mulss %xmm0, %xmm3 +; CHECK-NEXT: subss %xmm2, %xmm1 +; CHECK-NEXT: addss %xmm0, %xmm1 +; CHECK-NEXT: divss %xmm3, %xmm1 +; CHECK-NEXT: movss %xmm1, (%esp) +; CHECK-NEXT: flds (%esp) +; CHECK-NEXT: popl %eax +; CHECK-NEXT: retl + %sub1 = fsub fast float %a0, %a1 + %mul2 = fmul fast float %sub1, %a2 + %neg = fneg fast float %a0 + %add3 = fadd fast float %a1, %neg + %sub4 = fadd fast float %add3, %a2 + %div5 = fdiv fast float %sub4, %mul2 + ret float %div5 +}