Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -718,6 +718,16 @@ return 1; case ISD::FMUL: + if (Options->HonorSignDependentRoundingFPMathOption && + !Options->UnsafeFPMath && !Flags.hasNoNaNs()) return 0; + + // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) + if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, + Options, Depth + 1)) + return V; + + return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, + Depth + 1); case ISD::FDIV: if (Options->HonorSignDependentRoundingFPMath()) return 0; @@ -783,7 +793,7 @@ case ISD::FMUL: case ISD::FDIV: - assert(!Options.HonorSignDependentRoundingFPMath()); + assert(!Options.HonorSignDependentRoundingFPMath() || Flags.hasNoNaNs()); // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) if (isNegatibleForFree(Op.getOperand(0), LegalOperations, @@ -10502,12 +10512,15 @@ if (SDValue NewSel = foldBinOpIntoSelect(N)) return NewSel; - if (Options.UnsafeFPMath) { + if (Options.UnsafeFPMath || + (Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) { // fold (fmul A, 0) -> 0 if (N1CFP && N1CFP->isZero()) return N1; + } - // fmul (fmul X, C1), X2 -> fmul X, C1 * C2 + if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) { + // fmul (fmul X, C1), C2 -> fmul X, C1 * C2 if (N0.getOpcode() == ISD::FMUL) { // Fold scalars or any vector constants (not just splats). // This fold is done in general by InstCombine, but extra fmul insts Index: test/CodeGen/X86/fmul-combines.ll =================================================================== --- test/CodeGen/X86/fmul-combines.ll +++ test/CodeGen/X86/fmul-combines.ll @@ -92,7 +92,6 @@ ; CHECK-LABEL: fmul_v4f32_two_consts_no_splat_reassoc: ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %y = fmul <4 x float> %x, %z = fmul reassoc <4 x float> %y, @@ -104,7 +103,6 @@ define <4 x float> @fmul_v4f32_two_consts_no_splat_reassoc_2(<4 x float> %x) { ; CHECK-LABEL: fmul_v4f32_two_consts_no_splat_reassoc_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addps %xmm0, %xmm0 ; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %y = fadd <4 x float> %x, %x Index: test/CodeGen/X86/fp-fold.ll =================================================================== --- test/CodeGen/X86/fp-fold.ll +++ test/CodeGen/X86/fp-fold.ll @@ -103,16 +103,10 @@ ; TODO: handle x*0 for fast flags the same as unsafe define float @fmul_zero(float %x) { -; STRICT-LABEL: fmul_zero: -; STRICT: # %bb.0: -; STRICT-NEXT: xorps %xmm1, %xmm1 -; STRICT-NEXT: mulss %xmm1, %xmm0 -; STRICT-NEXT: retq -; -; UNSAFE-LABEL: fmul_zero: -; UNSAFE: # %bb.0: -; UNSAFE-NEXT: xorps %xmm0, %xmm0 -; UNSAFE-NEXT: retq +; ANY-LABEL: fmul_zero: +; ANY: # %bb.0: +; ANY-NEXT: xorps %xmm0, %xmm0 +; ANY-NEXT: retq %r = fmul nnan nsz float %x, 0.0 ret float %r } @@ -124,3 +118,13 @@ %r = fmul float %x, 1.0 ret float %r } + +define float @fmul_x_const_const(float %x) { +; ANY-LABEL: fmul_x_const_const: +; ANY: # %bb.0: +; ANY-NEXT: mulss {{.*}}(%rip), %xmm0 +; ANY-NEXT: retq + %mul = fmul reassoc float %x, 9.0 + %r = fmul reassoc float %mul, 4.0 + ret float %r +}