Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -1307,9 +1307,7 @@ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX; def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), "movhpd\t{$src, $dst|$dst, $src}", - [(store (f64 (vector_extract - (v2f64 (X86Unpckh VR128:$src, VR128:$src)), - (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX; + [], IIC_SSE_MOV_LH>, VEX; def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), "movhps\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract @@ -1318,9 +1316,7 @@ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>; def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), "movhpd\t{$src, $dst|$dst, $src}", - [(store (f64 (vector_extract - (v2f64 (X86Unpckh VR128:$src, VR128:$src)), - (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>; + [], IIC_SSE_MOV_LH>; } // SchedRW let Predicates = [HasAVX] in { @@ -1332,6 +1328,8 @@ (bc_v4i32 (v2i64 (X86vzload addr:$src2)))), (VMOVHPSrm VR128:$src1, addr:$src2)>; + // VMOVHPD patterns + // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem // is during lowering, where it's not possible to recognize the load fold // cause it has two uses through a bitcast. One use disappears at isel time @@ -1344,6 +1342,16 @@ def : Pat<(v2f64 (X86Unpckl VR128:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (VMOVHPDrm VR128:$src1, addr:$src2)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86Unpckh VR128:$src, VR128:$src)), + (iPTR 0))), addr:$dst), + (VMOVHPDmr addr:$dst, VR128:$src)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86VPermilpi VR128:$src, (i8 1))), + (iPTR 0))), addr:$dst), + (VMOVHPDmr addr:$dst, VR128:$src)>; } let Predicates = [UseSSE1] in { @@ -1369,6 +1377,16 @@ def : Pat<(v2f64 (X86Unpckl VR128:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (MOVHPDrm VR128:$src1, addr:$src2)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86Unpckh VR128:$src, VR128:$src)), + (iPTR 0))), addr:$dst), + (MOVHPDmr addr:$dst, VR128:$src)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))), + (iPTR 0))), addr:$dst), + (MOVHPDmr addr:$dst, VR128:$src)>; } //===----------------------------------------------------------------------===// Index: test/CodeGen/X86/extractelement-load.ll =================================================================== --- test/CodeGen/X86/extractelement-load.ll +++ test/CodeGen/X86/extractelement-load.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s ; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s +; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -29,16 +30,15 @@ ; This case could easily end up inf-looping in the DAG combiner due to an ; low alignment load of the vector which prevents us from reliably forming a ; narrow load. -; FIXME: It would be nice to detect whether the target has fast and legal -; unaligned loads and use them here. + +; The expected codegen is identical for the AVX case except +; load/store instructions will have a leading 'v', so we don't +; need to special-case the checks. + define void @t3() { ; CHECK-LABEL: t3: -; -; This movs the entire vector, shuffling the high double down. If we fixed the -; FIXME above it would just move the high double directly. ; CHECK: movupd -; CHECK: shufpd -; CHECK: movlpd +; CHECK: movhpd bb: %tmp13 = load <2 x double>* undef, align 1