Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1799,6 +1799,7 @@ setTargetDAGCombine(ISD::FSUB); setTargetDAGCombine(ISD::FNEG); setTargetDAGCombine(ISD::FMA); + setTargetDAGCombine(ISD::FMAXNUM); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::MLOAD); @@ -26594,6 +26595,51 @@ N->getOperand(0), N->getOperand(1)); } +static SDValue performFMaxNumCombine(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { + EVT VT = N->getValueType(0); + + // TODO: Check for global or instruction-level "nnan". In that case, we + // should be able to lower to FMAX/FMIN alone. + // TODO: If an operand is already known to be a NaN or not a NaN, this + // should be an optional swap and FMAX/FMIN. + // TODO: Allow f64, vectors, and fminnum. + + if (VT != MVT::f32 || !Subtarget->hasSSE1() || Subtarget->useSoftFloat()) + return SDValue(); + + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + SDLoc DL(N); + EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType( + DAG.getDataLayout(), *DAG.getContext(), VT); + + // There are 4 possibilities involving NaN inputs, and these are the required + // outputs: + // Op1 + // Num NaN + // ---------------- + // Num | Max | Op0 | + // Op0 ---------------- + // NaN | Op1 | NaN | + // ---------------- + // + // The SSE FP max/min instructions were not designed for this case, but rather + // to implement: + // Max = Op1 > Op0 ? Op1 : Op0 + // + // So they always return Op0 if either input is a NaN. However, we can still + // use those instructions for fmaxnum by selecting away a NaN input. + + // If either operand is NaN, the 2nd source operand (Op0) is passed through. + SDValue Max = DAG.getNode(X86ISD::FMAX, DL, VT, Op1, Op0); + SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO); + + // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands + // are NaN, the NaN value of Op1 is the result. + return DAG.getNode(ISD::SELECT, DL, VT, IsOp0Nan, Op1, Max); +} + /// Do target-specific dag combines on X86ISD::FAND nodes. static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { @@ -27453,6 +27499,7 @@ case X86ISD::FOR: return PerformFORCombine(N, DAG, Subtarget); case X86ISD::FMIN: case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); + case ISD::FMAXNUM: return performFMaxNumCombine(N, DAG, Subtarget); case X86ISD::FAND: return PerformFANDCombine(N, DAG, Subtarget); case X86ISD::FANDN: return PerformFANDNCombine(N, DAG, Subtarget); case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); Index: test/CodeGen/X86/fmaxnum.ll =================================================================== --- test/CodeGen/X86/fmaxnum.ll +++ test/CodeGen/X86/fmaxnum.ll @@ -16,12 +16,27 @@ ; CHECK-LABEL: @test_fmaxf -; CHECK: jmp fmaxf +; SSE: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: andps %xmm1, %xmm3 +; SSE-NEXT: maxss %xmm0, %xmm1 +; SSE-NEXT: andnps %xmm1, %xmm2 +; SSE-NEXT: orps %xmm3, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX: vmaxss %xmm0, %xmm1, %xmm2 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: retq define float @test_fmaxf(float %x, float %y) { %z = call float @fmaxf(float %x, float %y) readnone ret float %z } +; FIXME: Doubles should be inlined similarly to floats. + ; CHECK-LABEL: @test_fmax ; CHECK: jmp fmax define double @test_fmax(double %x, double %y) { @@ -37,12 +52,27 @@ } ; CHECK-LABEL: @test_intrinsic_fmaxf -; CHECK: jmp fmaxf +; SSE: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: andps %xmm1, %xmm3 +; SSE-NEXT: maxss %xmm0, %xmm1 +; SSE-NEXT: andnps %xmm1, %xmm2 +; SSE-NEXT: orps %xmm3, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX: vmaxss %xmm0, %xmm1, %xmm2 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: retq define float @test_intrinsic_fmaxf(float %x, float %y) { %z = call float @llvm.maxnum.f32(float %x, float %y) readnone ret float %z } +; FIXME: Doubles should be inlined similarly to floats. + ; CHECK-LABEL: @test_intrinsic_fmax ; CHECK: jmp fmax define double @test_intrinsic_fmax(double %x, double %y) { @@ -57,122 +87,160 @@ ret x86_fp80 %z } +; FIXME: This should not be doing 4 scalar ops on a 2 element vector. +; FIXME: This should use vector ops (maxps / cmpps). + ; CHECK-LABEL: @test_intrinsic_fmax_v2f32 -; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm2, %xmm5 +; SSE-NEXT: maxss %xmm3, %xmm2 +; SSE-NEXT: andnps %xmm2, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1,2,3] +; SSE-NEXT: movaps %xmm5, %xmm3 +; SSE-NEXT: cmpunordss %xmm3, %xmm3 +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: andps %xmm2, %xmm6 +; SSE-NEXT: maxss %xmm5, %xmm2 +; SSE-NEXT: andnps %xmm2, %xmm3 +; SSE-NEXT: orps %xmm6, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: andps %xmm1, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: maxss %xmm0, %xmm5 +; SSE-NEXT: andnps %xmm5, %xmm2 +; SSE-NEXT: orps %xmm4, %xmm2 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; SSE: movaps %xmm1, %xmm0 -; SSE-NEXT: addq $72, %rsp +; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] +; SSE-NEXT: movapd %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm1, %xmm5 +; SSE-NEXT: maxss %xmm0, %xmm1 +; SSE-NEXT: andnps %xmm1, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: callq fmaxf -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: addq $56, %rsp +; AVX: vmaxss %xmm0, %xmm1, %xmm2 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] +; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5 +; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0] +; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5 +; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm3 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm3, %xmm0 +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] ; AVX-NEXT: retq +; define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) { %z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone ret <2 x float> %z } +; FIXME: This should use vector ops (maxps / cmpps). + ; CHECK-LABEL: @test_intrinsic_fmax_v4f32 -; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm2, %xmm5 +; SSE-NEXT: maxss %xmm3, %xmm2 +; SSE-NEXT: andnps %xmm2, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1,2,3] +; SSE-NEXT: movaps %xmm5, %xmm3 +; SSE-NEXT: cmpunordss %xmm3, %xmm3 +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: andps %xmm2, %xmm6 +; SSE-NEXT: maxss %xmm5, %xmm2 +; SSE-NEXT: andnps %xmm2, %xmm3 +; SSE-NEXT: orps %xmm6, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: andps %xmm1, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: maxss %xmm0, %xmm5 +; SSE-NEXT: andnps %xmm5, %xmm2 +; SSE-NEXT: orps %xmm4, %xmm2 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0] -; SSE-NEXT: callq fmaxf -; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; SSE: movaps %xmm1, %xmm0 -; SSE-NEXT: addq $72, %rsp +; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] +; SSE-NEXT: movapd %xmm0, %xmm4 +; SSE-NEXT: cmpunordss %xmm4, %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andps %xmm1, %xmm5 +; SSE-NEXT: maxss %xmm0, %xmm1 +; SSE-NEXT: andnps %xmm1, %xmm4 +; SSE-NEXT: orps %xmm5, %xmm4 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX-NEXT: callq fmaxf -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; AVX: callq fmaxf -; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: addq $56, %rsp +; AVX: vmaxss %xmm0, %xmm1, %xmm2 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] +; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5 +; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0] +; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5 +; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3 +; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm3 +; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm3, %xmm0 +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] ; AVX-NEXT: retq define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) { %z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone ret <4 x float> %z } +; FIXME: Vector of doubles should be inlined similarly to vector of floats. + ; CHECK-LABEL: @test_intrinsic_fmax_v2f64 ; CHECK: callq fmax ; CHECK: callq fmax @@ -181,6 +249,8 @@ ret <2 x double> %z } +; FIXME: Vector of doubles should be inlined similarly to vector of floats. + ; CHECK-LABEL: @test_intrinsic_fmax_v4f64 ; CHECK: callq fmax ; CHECK: callq fmax @@ -191,6 +261,8 @@ ret <4 x double> %z } +; FIXME: Vector of doubles should be inlined similarly to vector of floats. + ; CHECK-LABEL: @test_intrinsic_fmax_v8f64 ; CHECK: callq fmax ; CHECK: callq fmax