Index: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp @@ -526,6 +526,8 @@ { X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 }, { X86::PTESTrr, X86::PTESTrm, TB_ALIGN_16 }, { X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 }, + { X86::RCPSSr, X86::RCPSSm, 0 }, + { X86::RCPSSr_Int, X86::RCPSSm_Int, 0 }, { X86::ROUNDPDr, X86::ROUNDPDm, TB_ALIGN_16 }, { X86::ROUNDPSr, X86::ROUNDPSm, TB_ALIGN_16 }, { X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 }, @@ -1239,9 +1241,13 @@ { X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 }, { X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 }, { X86::VRCPSSr, X86::VRCPSSm, 0 }, + { X86::VRCPSSr_Int, X86::VRCPSSm_Int, 0 }, { X86::VRSQRTSSr, X86::VRSQRTSSm, 0 }, + { X86::VRSQRTSSr_Int, X86::VRSQRTSSm_Int, 0 }, { X86::VSQRTSDr, X86::VSQRTSDm, 0 }, + { X86::VSQRTSDr_Int, X86::VSQRTSDm_Int, 0 }, { X86::VSQRTSSr, X86::VSQRTSSm, 0 }, + { X86::VSQRTSSr_Int, X86::VSQRTSSm_Int, 0 }, { X86::VADDPDrr, X86::VADDPDrm, 0 }, { X86::VADDPSrr, X86::VADDPSrm, 0 }, { X86::VADDSDrr, X86::VADDSDrm, 0 }, Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -3369,7 +3369,7 @@ def : Pat<(Intr (load addr:$src)), (vt (COPY_TO_REGCLASS(!cast(NAME#Suffix##m) addr:$src), VR128))>; - def : Pat<(Intr mem_cpat:$src), + def : Pat<(Intr mem_cpat:$src), (!cast(NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), mem_cpat:$src)>; } @@ -3390,16 +3390,15 @@ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [], itins.rm, d>, Sched<[itins.Sched.Folded, ReadAfterLd]>; let isCodeGenOnly = 1 in { - // todo: uncomment when all r_Int forms will be added to X86InstrInfo.cpp - //def r_Int : I, Sched<[itins.Sched.Folded]>; + def r_Int : I, Sched<[itins.Sched.Folded]>; let mayLoad = 1 in def m_Int : I, Sched<[itins.Sched.Folded, ReadAfterLd]>; + []>, Sched<[itins.Sched.Folded, ReadAfterLd]>; } } @@ -3411,13 +3410,11 @@ (!cast("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), mem_cpat:$src)>; - // todo: use r_Int form when it will be ready - //def : Pat<(Intr VR128:$src), (!cast("V"#NAME#Suffix##r_Int) - // (VT (IMPLICIT_DEF)), VR128:$src)>; def : Pat<(Intr VR128:$src), (vt (COPY_TO_REGCLASS( !cast("V"#NAME#Suffix##r) (ScalarVT (IMPLICIT_DEF)), (ScalarVT (COPY_TO_REGCLASS VR128:$src, RC))), VR128))>; + def : Pat<(Intr mem_cpat:$src), (!cast("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)), mem_cpat:$src)>; @@ -3540,6 +3537,44 @@ // There is no f64 version of the reciprocal approximation instructions. +// TODO: We should add *scalar* op patterns for these just like we have for +// the binops above. If the binop and unop patterns could all be unified +// that would be even better. + +multiclass scalar_unary_math_patterns { + let Predicates = [BasePredicate] in { + def : Pat<(VT (Move VT:$dst, (Intr VT:$src))), + (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src)>; + } + + // With SSE 4.1, blendi is preferred to movs*, so match that too. + let Predicates = [UseSSE41] in { + def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))), + (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src)>; + } + + // Repeat for AVX versions of the instructions. + let Predicates = [HasAVX] in { + def : Pat<(VT (Move VT:$dst, (Intr VT:$src))), + (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; + + def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))), + (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; + } +} + +defm : scalar_unary_math_patterns; +defm : scalar_unary_math_patterns; +defm : scalar_unary_math_patterns; +defm : scalar_unary_math_patterns; + + //===----------------------------------------------------------------------===// // SSE 1 & 2 - Non-temporal stores //===----------------------------------------------------------------------===// Index: llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll +++ llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll @@ -0,0 +1,73 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2 < %s | FileCheck --check-prefix=SSE %s +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse4.1 < %s | FileCheck --check-prefix=SSE %s +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx < %s | FileCheck --check-prefix=AVX %s + +; PR21507 - https://llvm.org/bugs/show_bug.cgi?id=21507 +; Each function should be a single math op; no extra moves. + + +define <4 x float> @recip(<4 x float> %x) { +; SSE-LABEL: recip: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: recip: +; AVX: # BB#0: +; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %y = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %x) + %shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> + ret <4 x float> %shuf +} + +define <4 x float> @recip_square_root(<4 x float> %x) { +; SSE-LABEL: recip_square_root: +; SSE: # BB#0: +; SSE-NEXT: rsqrtss %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: recip_square_root: +; AVX: # BB#0: +; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %y = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %x) + %shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> + ret <4 x float> %shuf +} + +define <4 x float> @square_root(<4 x float> %x) { +; SSE-LABEL: square_root: +; SSE: # BB#0: +; SSE-NEXT: sqrtss %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: square_root: +; AVX: # BB#0: +; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %y = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x) + %shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> + ret <4 x float> %shuf +} + +define <2 x double> @square_root_double(<2 x double> %x) { +; SSE-LABEL: square_root_double: +; SSE: # BB#0: +; SSE-NEXT: sqrtsd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: square_root_double: +; AVX: # BB#0: +; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %y = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %x) + %shuf = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> + ret <2 x double> %shuf +} + +declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) +declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) +declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) +declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) +