Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -1466,6 +1466,8 @@ IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM >; +// FIXME: We probably want to match the rm form only when optimizing for +// size, to avoid false depenendecies (see sse_fp_unop_s for details) multiclass sse12_cvt_s opc, RegisterClass SrcRC, RegisterClass DstRC, SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag, string asm, OpndItins itins> { @@ -1489,6 +1491,8 @@ } } +// FIXME: We probably want to match the rm form only when optimizing for +// size, to avoid false depenendecies (see sse_fp_unop_s for details) multiclass sse12_vcvt_avx opc, RegisterClass SrcRC, RegisterClass DstRC, X86MemOperand x86memop, string asm> { let hasSideEffects = 0, Predicates = [UseAVX] in { @@ -1626,6 +1630,8 @@ // Conversion Instructions Intrinsics - Match intrinsics which expect MM // and/or XMM operand(s). +// FIXME: We probably want to match the rm form only when optimizing for +// size, to avoid false depenendecies (see sse_fp_unop_s for details) multiclass sse12_cvt_sint opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, Operand memop, ComplexPattern mem_cpat, string asm, OpndItins itins> { @@ -3387,9 +3393,18 @@ def : Pat<(Intr (load addr:$src)), (vt (COPY_TO_REGCLASS(!cast(NAME#Suffix##m) addr:$src), VR128))>; - def : Pat<(Intr mem_cpat:$src), - (!cast(NAME#Suffix##m_Int) - (vt (IMPLICIT_DEF)), mem_cpat:$src)>; + } + // We don't want to fold scalar loads into these instructions unless + // optimizing for size. This is because the folded instruction will have a + // partial register update, while the unfolded sequence will not, e.g. + // movss mem, %xmm0 + // rcpss %xmm0, %xmm0 + // which has a clobber before the rcp, vs. + // rcpss mem, %xmm0 + let Predicates = [target, OptForSize] in { + def : Pat<(Intr mem_cpat:$src), + (!cast(NAME#Suffix##m_Int) + (vt (IMPLICIT_DEF)), mem_cpat:$src)>; } } @@ -3420,28 +3435,37 @@ } } + // We don't want to fold scalar loads into these instructions unless + // optimizing for size. This is because the folded instruction will have a + // partial register update, while the unfolded sequence will not, e.g. + // vmovss mem, %xmm0 + // vrcpss %xmm0, %xmm0, %xmm0 + // which has a clobber before the rcp, vs. + // vrcpss mem, %xmm0, %xmm0 + // TODO: In theory, we could fold the load, and avoid the stall caused by + // the partial register store, either in ExeDepFix or with smarter RA. let Predicates = [UseAVX] in { def : Pat<(OpNode RC:$src), (!cast("V"#NAME#Suffix##r) (ScalarVT (IMPLICIT_DEF)), RC:$src)>; - - def : Pat<(vt (OpNode mem_cpat:$src)), - (!cast("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), - mem_cpat:$src)>; - } let Predicates = [HasAVX] in { def : Pat<(Intr VR128:$src), (!cast("V"#NAME#Suffix##r_Int) (vt (IMPLICIT_DEF)), VR128:$src)>; - - def : Pat<(Intr mem_cpat:$src), - (!cast("V"#NAME#Suffix##m_Int) + } + let Predicates = [HasAVX, OptForSize] in { + def : Pat<(Intr mem_cpat:$src), + (!cast("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), mem_cpat:$src)>; } - let Predicates = [UseAVX, OptForSize] in - def : Pat<(ScalarVT (OpNode (load addr:$src))), - (!cast("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)), - addr:$src)>; + let Predicates = [UseAVX, OptForSize] in { + def : Pat<(ScalarVT (OpNode (load addr:$src))), + (!cast("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)), + addr:$src)>; + def : Pat<(vt (OpNode mem_cpat:$src)), + (!cast("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), + mem_cpat:$src)>; + } } /// sse1_fp_unop_p - SSE1 unops in packed form. Index: llvm/trunk/test/CodeGen/X86/fold-load-unops.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fold-load-unops.ll +++ llvm/trunk/test/CodeGen/X86/fold-load-unops.ll @@ -2,17 +2,19 @@ ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX -; Verify that we're folding the load into the math instruction. +; Verify we fold loads into unary sse intrinsics only when optimizing for size define float @rcpss(float* %a) { ; SSE-LABEL: rcpss: ; SSE: # BB#0: -; SSE-NEXT: rcpss (%rdi), %xmm0 +; SSE-NEXT: movss (%rdi), %xmm0 +; SSE-NEXT: rcpss %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: rcpss: ; AVX: # BB#0: -; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovss (%rdi), %xmm0 +; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %ld = load float, float* %a %ins = insertelement <4 x float> undef, float %ld, i32 0 @@ -24,12 +26,14 @@ define float @rsqrtss(float* %a) { ; SSE-LABEL: rsqrtss: ; SSE: # BB#0: -; SSE-NEXT: rsqrtss (%rdi), %xmm0 +; SSE-NEXT: movss (%rdi), %xmm0 +; SSE-NEXT: rsqrtss %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: rsqrtss: ; AVX: # BB#0: -; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovss (%rdi), %xmm0 +; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %ld = load float, float* %a %ins = insertelement <4 x float> undef, float %ld, i32 0 @@ -41,12 +45,14 @@ define float @sqrtss(float* %a) { ; SSE-LABEL: sqrtss: ; SSE: # BB#0: -; SSE-NEXT: sqrtss (%rdi), %xmm0 +; SSE-NEXT: movss (%rdi), %xmm0 +; SSE-NEXT: sqrtss %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sqrtss: ; AVX: # BB#0: -; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovss (%rdi), %xmm0 +; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %ld = load float, float* %a %ins = insertelement <4 x float> undef, float %ld, i32 0 @@ -58,12 +64,14 @@ define double @sqrtsd(double* %a) { ; SSE-LABEL: sqrtsd: ; SSE: # BB#0: -; SSE-NEXT: sqrtsd (%rdi), %xmm0 +; SSE-NEXT: movsd (%rdi), %xmm0 +; SSE-NEXT: sqrtsd %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sqrtsd: ; AVX: # BB#0: -; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovsd (%rdi), %xmm0 +; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %ld = load double, double* %a %ins = insertelement <2 x double> undef, double %ld, i32 0 @@ -72,9 +80,75 @@ ret double %ext } +define float @rcpss_size(float* %a) optsize { +; SSE-LABEL: rcpss_size: +; SSE: # BB#0: +; SSE-NEXT: rcpss (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: rcpss_size: +; AVX: # BB#0: +; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load float, float* %a + %ins = insertelement <4 x float> undef, float %ld, i32 0 + %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins) + %ext = extractelement <4 x float> %res, i32 0 + ret float %ext +} + +define float @rsqrtss_size(float* %a) optsize { +; SSE-LABEL: rsqrtss_size: +; SSE: # BB#0: +; SSE-NEXT: rsqrtss (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: rsqrtss_size: +; AVX: # BB#0: +; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load float, float* %a + %ins = insertelement <4 x float> undef, float %ld, i32 0 + %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins) + %ext = extractelement <4 x float> %res, i32 0 + ret float %ext +} + +define float @sqrtss_size(float* %a) optsize{ +; SSE-LABEL: sqrtss_size: +; SSE: # BB#0: +; SSE-NEXT: sqrtss (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sqrtss_size: +; AVX: # BB#0: +; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load float, float* %a + %ins = insertelement <4 x float> undef, float %ld, i32 0 + %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins) + %ext = extractelement <4 x float> %res, i32 0 + ret float %ext +} + +define double @sqrtsd_size(double* %a) optsize { +; SSE-LABEL: sqrtsd_size: +; SSE: # BB#0: +; SSE-NEXT: sqrtsd (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sqrtsd_size: +; AVX: # BB#0: +; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load double, double* %a + %ins = insertelement <2 x double> undef, double %ld, i32 0 + %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins) + %ext = extractelement <2 x double> %res, i32 0 + ret double %ext +} declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone -