Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1785,6 +1785,7 @@ setTargetDAGCombine(ISD::FSUB); setTargetDAGCombine(ISD::FNEG); setTargetDAGCombine(ISD::FMA); + setTargetDAGCombine(ISD::FMAXNUM); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::MLOAD); @@ -26405,6 +26406,63 @@ N->getOperand(0), N->getOperand(1)); } +static SDValue performFMaxNumCombine(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { + EVT VT = N->getValueType(0); + + // TODO: Check for global or instruction-level "nnan". In that case, we + // should be able to lower to FMAX/FMIN alone. + // TODO: If an operand is already known to be a NaN or not a NaN, this + // should be an optional swap and FMAX/FMIN. + // TODO: Allow f64, vectors, and fminnum. + + if (VT != MVT::f32 || !Subtarget->hasSSE1() || Subtarget->useSoftFloat()) + return SDValue(); + + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + SDLoc DL(N); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), + VT); + + // There are 4 possibilities involving NaN inputs, and these are the required + // outputs: + // Op1 + // Num NaN + // ---------------- + // Num | Max | Op0 | + // Op0 ---------------- + // NaN | Op1 | NaN | + // ---------------- + // + // The SSE FP max/min instructions were not designed for this case, but rather + // to implement: + // max = op1 > op2 ? op1 : op2 + // + // So they always return op2 if either input is a NaN. However, we can still + // use those instructions for fmaxnum by selecting away a NaN input. + // + // 1. If the first operand is a NaN, calculate the max of the second operand + // against itself, so return the second operand. + // 2. If the second operand is a NaN, return the first operand (if it's not a + // NaN too). + // 3. If both operands are NaN, return the second operand's NaN value because + // Op1 was selected. + // 4. If neither operand is a NaN, calculate the max of the first operand and + // the second operand. + + // Is the first operand a NaN? + SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO); + + // If the first operand is not a NaN, then pass it through. Otherwise, choose + // the second operand. + SDValue Op0NotNan = DAG.getNode(ISD::SELECT, DL, VT, IsOp0Nan, Op1, Op0); + + return DAG.getNode(X86ISD::FMAX, DL, VT, Op1, Op0NotNan); +} + + /// Do target-specific dag combines on X86ISD::FAND nodes. static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { @@ -27250,6 +27308,7 @@ case X86ISD::FOR: return PerformFORCombine(N, DAG, Subtarget); case X86ISD::FMIN: case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); + case ISD::FMAXNUM: return performFMaxNumCombine(N, DAG, Subtarget); case X86ISD::FAND: return PerformFANDCombine(N, DAG, Subtarget); case X86ISD::FANDN: return PerformFANDNCombine(N, DAG, Subtarget); case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); Index: test/CodeGen/X86/fmaxnum.ll =================================================================== --- test/CodeGen/X86/fmaxnum.ll +++ test/CodeGen/X86/fmaxnum.ll @@ -16,7 +16,20 @@ ; CHECK-LABEL: @test_fmaxf -; CHECK: jmp fmaxf +; SSE: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: andps %xmm1, %xmm3 +; SSE-NEXT: andnps %xmm0, %xmm2 +; SSE-NEXT: orps %xmm3, %xmm2 +; SSE-NEXT: maxss %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX: vcmpunordss %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq define float @test_fmaxf(float %x, float %y) { %z = call float @fmaxf(float %x, float %y) readnone ret float %z @@ -37,7 +50,20 @@ } ; CHECK-LABEL: @test_intrinsic_fmaxf -; CHECK: jmp fmaxf +; SSE: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: andps %xmm1, %xmm3 +; SSE-NEXT: andnps %xmm0, %xmm2 +; SSE-NEXT: orps %xmm3, %xmm2 +; SSE-NEXT: maxss %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX: vcmpunordss %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq define float @test_intrinsic_fmaxf(float %x, float %y) { %z = call float @llvm.maxnum.f32(float %x, float %y) readnone ret float %z @@ -58,57 +84,72 @@ } ; CHECK-LABEL: @test_intrinsic_fmax_v2f32 -; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm2, %xmm5 +; SSE-NEXT: andnps %xmm3, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: maxss %xmm4, %xmm2 +; SSE-NEXT: movaps %xmm1, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1,2,3] +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: cmpunordss %xmm5, %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: andps %xmm3, %xmm6 +; SSE-NEXT: andnps %xmm4, %xmm5 +; SSE-NEXT: orps %xmm6, %xmm5 +; SSE-NEXT: maxss %xmm5, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: andps %xmm1, %xmm2 +; SSE-NEXT: andnps %xmm0, %xmm4 +; SSE-NEXT: orps %xmm2, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: maxss %xmm4, %xmm2 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; SSE: movaps %xmm1, %xmm0 -; SSE-NEXT: addq $72, %rsp +; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] +; SSE-NEXT: movapd %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm1, %xmm5 +; SSE-NEXT: andnps %xmm0, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: maxss %xmm4, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: callq fmaxf -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: addq $56, %rsp +; AVX: vcmpunordss %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm2 +; AVX-NEXT: vmaxss %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX-NEXT: vcmpunordss %xmm4, %xmm4, %xmm5 +; AVX-NEXT: vblendvps %xmm5, %xmm3, %xmm4, %xmm4 +; AVX-NEXT: vmaxss %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX-NEXT: vcmpunordss %xmm4, %xmm4, %xmm5 +; AVX-NEXT: vblendvps %xmm5, %xmm3, %xmm4, %xmm4 +; AVX-NEXT: vmaxss %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] ; AVX-NEXT: retq define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) { %z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone @@ -116,57 +157,72 @@ } ; CHECK-LABEL: @test_intrinsic_fmax_v4f32 -; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm2, %xmm5 +; SSE-NEXT: andnps %xmm3, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: maxss %xmm4, %xmm2 +; SSE-NEXT: movaps %xmm1, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1,2,3] +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: cmpunordss %xmm5, %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: andps %xmm3, %xmm6 +; SSE-NEXT: andnps %xmm4, %xmm5 +; SSE-NEXT: orps %xmm6, %xmm5 +; SSE-NEXT: maxss %xmm5, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: andps %xmm1, %xmm2 +; SSE-NEXT: andnps %xmm0, %xmm4 +; SSE-NEXT: orps %xmm2, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: maxss %xmm4, %xmm2 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; SSE: movaps %xmm1, %xmm0 -; SSE-NEXT: addq $72, %rsp +; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] +; SSE-NEXT: movapd %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm1, %xmm5 +; SSE-NEXT: andnps %xmm0, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: maxss %xmm4, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: callq fmaxf -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: addq $56, %rsp +; AVX: vcmpunordss %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm2 +; AVX-NEXT: vmaxss %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX-NEXT: vcmpunordss %xmm4, %xmm4, %xmm5 +; AVX-NEXT: vblendvps %xmm5, %xmm3, %xmm4, %xmm4 +; AVX-NEXT: vmaxss %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX-NEXT: vcmpunordss %xmm4, %xmm4, %xmm5 +; AVX-NEXT: vblendvps %xmm5, %xmm3, %xmm4, %xmm4 +; AVX-NEXT: vmaxss %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] ; AVX-NEXT: retq define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) { %z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone