Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -4602,17 +4602,17 @@ (v4i32 (scalar_to_vector (loadi32 addr:$src))))], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2i64 (scalar_to_vector GR64:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; let isCodeGenOnly = 1 in def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert GR64:$src))], IIC_SSE_MOVDQ>, Sched<[WriteMove]>; } // ExeDomain = SSEPackedInt @@ -4681,7 +4681,7 @@ VEX; def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (extractelt (v2i64 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>; @@ -4694,7 +4694,7 @@ [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>; let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>; } // ExeDomain = SSEPackedInt @@ -4721,7 +4721,7 @@ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>; def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bitconvert FR64:$src))], IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>; def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), @@ -4811,12 +4811,12 @@ } } -// These are the correct encodings of the instructions so that we know how to -// read correct assembly, even though we continue to emit the wrong ones for -// compatibility with Darwin's buggy assembler. -def : InstAlias<"movq\t{$src, $dst|$dst, $src}", +// Before the MC layer of LLVM existed, clang emitted "movd" assembly instead of +// "movq" due to MacOS parsing limitation. In order to parse old assembly, we add +// these aliases. +def : InstAlias<"movd\t{$src, $dst|$dst, $src}", (MOV64toPQIrr VR128:$dst, GR64:$src), 0>; -def : InstAlias<"movq\t{$src, $dst|$dst, $src}", +def : InstAlias<"movd\t{$src, $dst|$dst, $src}", (MOVPQIto64rr GR64:$dst, VR128:$src), 0>; // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX. def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}", Index: llvm/trunk/test/CodeGen/X86/GlobalISel/memop.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/GlobalISel/memop.ll +++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop.ll @@ -65,7 +65,7 @@ ; SSE-LABEL: test_load_double: ; SSE: # BB#0: ; SSE-NEXT: movq (%rdi), %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; ; ALL_AVX-LABEL: test_load_double: @@ -160,7 +160,7 @@ ; ; SSE_FAST-LABEL: test_store_double: ; SSE_FAST: # BB#0: -; SSE_FAST-NEXT: movd %xmm0, %rax +; SSE_FAST-NEXT: movq %xmm0, %rax ; SSE_FAST-NEXT: movq %rax, (%rdi) ; SSE_FAST-NEXT: movq %rdi, %rax ; SSE_FAST-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/asm-reg-type-mismatch.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/asm-reg-type-mismatch.ll +++ llvm/trunk/test/CodeGen/X86/asm-reg-type-mismatch.ll @@ -27,5 +27,5 @@ ret i64 %0 ; CHECK: test2 ; CHECK: movq {{.*}}, %xmm7 - ; CHECK: movd %xmm7, %rax + ; CHECK: movq %xmm7, %rax } Index: llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll +++ llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll @@ -26,7 +26,7 @@ define void @store_double(double* %fptr, double %v) { ; CHECK-LABEL: @store_double -; CHECK: movd %xmm0, %rax +; CHECK: movq %xmm0, %rax ; CHECK: movq %rax, (%rdi) store atomic double %v, double* %fptr unordered, align 8 ret void @@ -59,7 +59,7 @@ define double @load_double(double* %fptr) { ; CHECK-LABEL: @load_double ; CHECK: movq (%rdi), %rax -; CHECK: movd %rax, %xmm0 +; CHECK: movq %rax, %xmm0 %v = load atomic double, double* %fptr unordered, align 8 ret double %v } @@ -85,7 +85,7 @@ define void @store_double_seq_cst(double* %fptr, double %v) { ; CHECK-LABEL: @store_double_seq_cst -; CHECK: movd %xmm0, %rax +; CHECK: movq %xmm0, %rax ; CHECK: xchgq %rax, (%rdi) store atomic double %v, double* %fptr seq_cst, align 8 ret void @@ -102,7 +102,7 @@ define double @load_double_seq_cst(double* %fptr) { ; CHECK-LABEL: @load_double_seq_cst ; CHECK: movq (%rdi), %rax -; CHECK: movd %rax, %xmm0 +; CHECK: movq %rax, %xmm0 %v = load atomic double, double* %fptr seq_cst, align 8 ret double %v } Index: llvm/trunk/test/CodeGen/X86/bitcast2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/bitcast2.ll +++ llvm/trunk/test/CodeGen/X86/bitcast2.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=x86-64 -mattr=-avx | grep movd | count 2 +; RUN: llc < %s -march=x86-64 -mattr=-avx | grep movq | count 2 ; RUN: llc < %s -march=x86-64 -mattr=-avx | not grep rsp define i64 @test1(double %A) { Index: llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -928,7 +928,7 @@ ; SSE-NEXT: pushq %r14 ; SSE-NEXT: pushq %rbx ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rcx +; SSE-NEXT: movq %xmm0, %rcx ; SSE-NEXT: movq %rcx, %r8 ; SSE-NEXT: movq %rcx, %r9 ; SSE-NEXT: movq %rcx, %r10 @@ -938,7 +938,7 @@ ; SSE-NEXT: movq %rcx, %rdi ; SSE-NEXT: andb $15, %cl ; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movd %xmm1, %rcx +; SSE-NEXT: movq %xmm1, %rcx ; SSE-NEXT: shrq $56, %rdi ; SSE-NEXT: andb $15, %dil ; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp) @@ -1106,7 +1106,7 @@ ; SSE-NEXT: pushq %r14 ; SSE-NEXT: pushq %rbx ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rcx +; SSE-NEXT: movq %xmm0, %rcx ; SSE-NEXT: movq %rcx, %r8 ; SSE-NEXT: movq %rcx, %r9 ; SSE-NEXT: movq %rcx, %r10 @@ -1116,7 +1116,7 @@ ; SSE-NEXT: movq %rcx, %rdi ; SSE-NEXT: andb $15, %cl ; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movd %xmm2, %rcx +; SSE-NEXT: movq %xmm2, %rcx ; SSE-NEXT: shrq $56, %rdi ; SSE-NEXT: andb $15, %dil ; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp) Index: llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll +++ llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll @@ -30,7 +30,7 @@ ; X64-NEXT: shlq $32, %rcx ; X64-NEXT: movl (%rdi,%rax), %eax ; X64-NEXT: orq %rcx, %rax -; X64-NEXT: movd %rax, %xmm0 +; X64-NEXT: movq %rax, %xmm0 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7] ; X64-NEXT: movd %xmm0, %eax Index: llvm/trunk/test/CodeGen/X86/extractelement-index.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/extractelement-index.ll +++ llvm/trunk/test/CodeGen/X86/extractelement-index.ll @@ -320,7 +320,7 @@ define i64 @extractelement_v2i64_0(<2 x i64> %a, i256 %i) nounwind { ; SSE-LABEL: extractelement_v2i64_0: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: extractelement_v2i64_0: @@ -335,7 +335,7 @@ ; SSE2-LABEL: extractelement_v2i64_1: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v2i64_1: @@ -355,7 +355,7 @@ ; SSE2-LABEL: extractelement_v4i64_1: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v4i64_1: @@ -376,7 +376,7 @@ ; SSE2-LABEL: extractelement_v4i64_3: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: retq ; ; SSE41-LABEL: extractelement_v4i64_3: Index: llvm/trunk/test/CodeGen/X86/gather-addresses.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/gather-addresses.ll +++ llvm/trunk/test/CodeGen/X86/gather-addresses.ll @@ -11,7 +11,7 @@ ; LIN: movdqa (%rsi), %xmm0 ; LIN: pand (%rdx), %xmm0 ; LIN: pextrq $1, %xmm0, %r[[REG4:.+]] -; LIN: movd %xmm0, %r[[REG2:.+]] +; LIN: movq %xmm0, %r[[REG2:.+]] ; LIN: movslq %e[[REG2]], %r[[REG1:.+]] ; LIN: sarq $32, %r[[REG2]] ; LIN: movslq %e[[REG4]], %r[[REG3:.+]] @@ -24,7 +24,7 @@ ; WIN: movdqa (%rdx), %xmm0 ; WIN: pand (%r8), %xmm0 ; WIN: pextrq $1, %xmm0, %r[[REG4:.+]] -; WIN: movd %xmm0, %r[[REG2:.+]] +; WIN: movq %xmm0, %r[[REG2:.+]] ; WIN: movslq %e[[REG2]], %r[[REG1:.+]] ; WIN: sarq $32, %r[[REG2]] ; WIN: movslq %e[[REG4]], %r[[REG3:.+]] Index: llvm/trunk/test/CodeGen/X86/i64-to-float.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/i64-to-float.ll +++ llvm/trunk/test/CodeGen/X86/i64-to-float.ll @@ -251,11 +251,11 @@ ; X64-SSE-NEXT: pandn %xmm3, %xmm0 ; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; X64-SSE-NEXT: por %xmm0, %xmm1 -; X64-SSE-NEXT: movd %xmm1, %rax +; X64-SSE-NEXT: movq %xmm1, %rax ; X64-SSE-NEXT: xorps %xmm0, %xmm0 ; X64-SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; X64-SSE-NEXT: movd %xmm1, %rax +; X64-SSE-NEXT: movq %xmm1, %rax ; X64-SSE-NEXT: xorps %xmm1, %xmm1 ; X64-SSE-NEXT: cvtsi2sdq %rax, %xmm1 ; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] Index: llvm/trunk/test/CodeGen/X86/isint.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/isint.ll +++ llvm/trunk/test/CodeGen/X86/isint.ll @@ -1,8 +1,7 @@ -; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck %s -; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK -check-prefix=CHECK64 %s ; PR19059 -; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK32 %s +; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK -check-prefix=CHECK32 %s define i32 @isint_return(double %d) nounwind { ; CHECK-LABEL: isint_return: @@ -15,7 +14,8 @@ %c = fcmp oeq double %d, %e ; CHECK32-NOT: movd {{.*}}, %r{{.*}} ; CHECK32-NOT: andq -; CHECK-NEXT: movd +; CHECK32-NEXT: movd +; CHECK64-NEXT: movq ; CHECK-NEXT: andl %z = zext i1 %c to i32 ret i32 %z Index: llvm/trunk/test/CodeGen/X86/lower-bitcast.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/lower-bitcast.ll +++ llvm/trunk/test/CodeGen/X86/lower-bitcast.ll @@ -44,16 +44,16 @@ define i64 @test3(i64 %A) { ; CHECK-LABEL: test3: ; CHECK: # BB#0: -; CHECK-NEXT: movd %rdi, %xmm0 +; CHECK-NEXT: movq %rdi, %xmm0 ; CHECK-NEXT: addps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: movd %xmm0, %rax +; CHECK-NEXT: movq %xmm0, %rax ; CHECK-NEXT: retq ; ; CHECK-WIDE-LABEL: test3: ; CHECK-WIDE: # BB#0: -; CHECK-WIDE-NEXT: movd %rdi, %xmm0 +; CHECK-WIDE-NEXT: movq %rdi, %xmm0 ; CHECK-WIDE-NEXT: addps {{.*}}(%rip), %xmm0 -; CHECK-WIDE-NEXT: movd %xmm0, %rax +; CHECK-WIDE-NEXT: movq %xmm0, %rax ; CHECK-WIDE-NEXT: retq %1 = bitcast i64 %A to <2 x float> %add = fadd <2 x float> %1, @@ -67,18 +67,18 @@ define i64 @test4(i64 %A) { ; CHECK-LABEL: test4: ; CHECK: # BB#0: -; CHECK-NEXT: movd %rdi, %xmm0 +; CHECK-NEXT: movq %rdi, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; CHECK-NEXT: paddd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-NEXT: movd %xmm0, %rax +; CHECK-NEXT: movq %xmm0, %rax ; CHECK-NEXT: retq ; ; CHECK-WIDE-LABEL: test4: ; CHECK-WIDE: # BB#0: -; CHECK-WIDE-NEXT: movd %rdi, %xmm0 +; CHECK-WIDE-NEXT: movq %rdi, %xmm0 ; CHECK-WIDE-NEXT: paddd {{.*}}(%rip), %xmm0 -; CHECK-WIDE-NEXT: movd %xmm0, %rax +; CHECK-WIDE-NEXT: movq %xmm0, %rax ; CHECK-WIDE-NEXT: retq %1 = bitcast i64 %A to <2 x i32> %add = add <2 x i32> %1, Index: llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll +++ llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll @@ -80,7 +80,7 @@ ; CHECK-NEXT: movd %esi, %xmm0 ; CHECK-NEXT: movd %edi, %xmm1 ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; CHECK-NEXT: movd %xmm1, %rax +; CHECK-NEXT: movq %xmm1, %rax ; CHECK-NEXT: retq %v0 = insertelement <2 x i32> undef, i32 %a, i32 0 %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1 Index: llvm/trunk/test/CodeGen/X86/mmx-cvt.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mmx-cvt.ll +++ llvm/trunk/test/CodeGen/X86/mmx-cvt.ll @@ -347,7 +347,7 @@ ; X64-NEXT: movq (%rdi), %mm0 ; X64-NEXT: paddd %mm0, %mm0 ; X64-NEXT: movd %mm0, %rax -; X64-NEXT: movd %rax, %xmm0 +; X64-NEXT: movq %rax, %xmm0 ; X64-NEXT: cvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %2 = bitcast <1 x i64>* %0 to x86_mmx* Index: llvm/trunk/test/CodeGen/X86/mod128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mod128.ll +++ llvm/trunk/test/CodeGen/X86/mod128.ll @@ -18,7 +18,7 @@ ; WIN64-DAG: movq $0, 40(%rsp) ; WIN64-DAG: movq $3, 32(%rsp) ; WIN64: callq __modti3 - ; WIN64: movd %xmm0, %rax + ; WIN64: movq %xmm0, %rax %1 = srem i128 %x, 3 %2 = trunc i128 %1 to i64 Index: llvm/trunk/test/CodeGen/X86/movmsk.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/movmsk.ll +++ llvm/trunk/test/CodeGen/X86/movmsk.ll @@ -100,7 +100,7 @@ define void @float_call_signbit(double %n) { ; CHECK-LABEL: float_call_signbit: ; CHECK: ## BB#0: ## %entry -; CHECK-NEXT: movd %xmm0, %rdi +; CHECK-NEXT: movq %xmm0, %rdi ; CHECK-NEXT: shrq $63, %rdi ; CHECK-NEXT: ## kill: %EDI %EDI %RDI ; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL Index: llvm/trunk/test/CodeGen/X86/nontemporal-2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/nontemporal-2.ll +++ llvm/trunk/test/CodeGen/X86/nontemporal-2.ll @@ -596,14 +596,14 @@ ; SSE2-LABEL: test_extract_i64: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: movntiq %rax, (%rdi) ; SSE2-NEXT: retq ; ; SSE4A-LABEL: test_extract_i64: ; SSE4A: # BB#0: ; SSE4A-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE4A-NEXT: movd %xmm0, %rax +; SSE4A-NEXT: movq %xmm0, %rax ; SSE4A-NEXT: movntiq %rax, (%rdi) ; SSE4A-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/pr18344.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr18344.ll +++ llvm/trunk/test/CodeGen/X86/pr18344.ll @@ -36,7 +36,7 @@ ; X64: # BB#0: # %begin ; X64-NEXT: movdqu (%rdx), %xmm0 ; X64-NEXT: pslld $4, %xmm0 -; X64-NEXT: movd %xmm0, %rax +; X64-NEXT: movq %xmm0, %rax ; X64-NEXT: movslq %eax, %r8 ; X64-NEXT: sarq $32, %rax ; X64-NEXT: pextrq $1, %xmm0, %rdx Index: llvm/trunk/test/CodeGen/X86/pr21792.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr21792.ll +++ llvm/trunk/test/CodeGen/X86/pr21792.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: pextrq $1, %xmm0, %rdx ; CHECK-NEXT: movq %rdx, %rcx ; CHECK-NEXT: shrq $32, %rcx -; CHECK-NEXT: movd %xmm0, %rax +; CHECK-NEXT: movq %xmm0, %rax ; CHECK-NEXT: movq %rax, %r9 ; CHECK-NEXT: shrq $32, %r9 ; CHECK-NEXT: andl $2032, %eax # imm = 0x7F0 Index: llvm/trunk/test/CodeGen/X86/pr30511.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pr30511.ll +++ llvm/trunk/test/CodeGen/X86/pr30511.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: cvtdq2pd %xmm0, %xmm0 ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 -; CHECK-NEXT: movd %xmm0, %rax +; CHECK-NEXT: movq %xmm0, %rax ; CHECK-NEXT: retq %1 = fadd <2 x double> %a, %2 = bitcast <2 x double> %1 to <2 x i64> Index: llvm/trunk/test/CodeGen/X86/pshufb-mask-comments.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/pshufb-mask-comments.ll +++ llvm/trunk/test/CodeGen/X86/pshufb-mask-comments.ll @@ -55,7 +55,7 @@ ; CHECK-LABEL: test5: ; CHECK: # BB#0: ; CHECK-NEXT: movl $1, %eax -; CHECK-NEXT: movd %rax, %xmm1 +; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%rax) ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [1,1] ; CHECK-NEXT: movdqa %xmm1, (%rax) Index: llvm/trunk/test/CodeGen/X86/ret-mmx.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/ret-mmx.ll +++ llvm/trunk/test/CodeGen/X86/ret-mmx.ll @@ -33,7 +33,7 @@ ; CHECK-LABEL: t3: ; CHECK: ## BB#0: ; CHECK-NEXT: movl $1, %eax -; CHECK-NEXT: movd %rax, %xmm0 +; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: retq ret <2 x i32> } Index: llvm/trunk/test/CodeGen/X86/sad_variations.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sad_variations.ll +++ llvm/trunk/test/CodeGen/X86/sad_variations.ll @@ -206,7 +206,7 @@ ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: psadbw %xmm0, %xmm1 -; SSE2-NEXT: movd %xmm1, %rax +; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: retq ; ; AVX2-LABEL: sad8_64bit_icmp_sext_slt: @@ -255,7 +255,7 @@ ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: psadbw %xmm0, %xmm1 -; SSE2-NEXT: movd %xmm1, %rax +; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: retq ; ; AVX2-LABEL: sad8_64bit_icmp_zext_slt: @@ -304,7 +304,7 @@ ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: psadbw %xmm0, %xmm1 -; SSE2-NEXT: movd %xmm1, %rax +; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: retq ; ; AVX2-LABEL: sad8_early_64bit_icmp_zext_slt: Index: llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll +++ llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll @@ -536,7 +536,7 @@ ; ; SSE2_64-LABEL: u64_to_d: ; SSE2_64: # BB#0: -; SSE2_64-NEXT: movd %rdi, %xmm1 +; SSE2_64-NEXT: movq %rdi, %xmm1 ; SSE2_64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE2_64-NEXT: subpd {{.*}}(%rip), %xmm1 ; SSE2_64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] Index: llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll +++ llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll @@ -58,17 +58,17 @@ ; SSE2-LABEL: ne_i256: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %r8 +; SSE2-NEXT: movq %xmm4, %r8 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %r9 -; SSE2-NEXT: movd %xmm0, %r10 -; SSE2-NEXT: movd %xmm1, %rsi +; SSE2-NEXT: movq %xmm4, %r9 +; SSE2-NEXT: movq %xmm0, %r10 +; SSE2-NEXT: movq %xmm1, %rsi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rdi +; SSE2-NEXT: movq %xmm0, %rdi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax -; SSE2-NEXT: movd %xmm2, %rcx -; SSE2-NEXT: movd %xmm3, %rdx +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movq %xmm3, %rdx ; SSE2-NEXT: xorq %rsi, %rdx ; SSE2-NEXT: xorq %r10, %rcx ; SSE2-NEXT: orq %rdx, %rcx @@ -100,17 +100,17 @@ ; SSE2-LABEL: eq_i256: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %r8 +; SSE2-NEXT: movq %xmm4, %r8 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %r9 -; SSE2-NEXT: movd %xmm0, %r10 -; SSE2-NEXT: movd %xmm1, %rsi +; SSE2-NEXT: movq %xmm4, %r9 +; SSE2-NEXT: movq %xmm0, %r10 +; SSE2-NEXT: movq %xmm1, %rsi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rdi +; SSE2-NEXT: movq %xmm0, %rdi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax -; SSE2-NEXT: movd %xmm2, %rcx -; SSE2-NEXT: movd %xmm3, %rdx +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movq %xmm3, %rdx ; SSE2-NEXT: xorq %rsi, %rdx ; SSE2-NEXT: xorq %r10, %rcx ; SSE2-NEXT: orq %rdx, %rcx Index: llvm/trunk/test/CodeGen/X86/shrink_vmul.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/shrink_vmul.ll +++ llvm/trunk/test/CodeGen/X86/shrink_vmul.ll @@ -801,7 +801,7 @@ ; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; CHECK-NEXT: movl $65536, %ecx # imm = 0x10000 -; CHECK-NEXT: movd %rcx, %xmm1 +; CHECK-NEXT: movq %rcx, %xmm1 ; CHECK-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] ; CHECK-NEXT: movdqa %xmm0, %xmm2 ; CHECK-NEXT: pmuludq %xmm1, %xmm2 @@ -839,7 +839,7 @@ ; CHECK-NEXT: psrad $16, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; CHECK-NEXT: movl $32768, %ecx # imm = 0x8000 -; CHECK-NEXT: movd %rcx, %xmm1 +; CHECK-NEXT: movq %rcx, %xmm1 ; CHECK-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] ; CHECK-NEXT: movdqa %xmm0, %xmm2 ; CHECK-NEXT: pmuludq %xmm1, %xmm2 Index: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll +++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll @@ -16,7 +16,7 @@ define i64 @test_mm_cvtsi128_si64(<2 x i64> %a0) nounwind { ; X64-LABEL: test_mm_cvtsi128_si64: ; X64: # BB#0: -; X64-NEXT: movd %xmm0, %rax +; X64-NEXT: movq %xmm0, %rax ; X64-NEXT: retq %res = extractelement <2 x i64> %a0, i32 0 ret i64 %res @@ -35,7 +35,7 @@ define <2 x i64> @test_mm_cvtsi64_si128(i64 %a0) nounwind { ; X64-LABEL: test_mm_cvtsi64_si128: ; X64: # BB#0: -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: retq %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0 %res1 = insertelement <2 x i64> %res0, i64 0, i32 1 Index: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -2291,8 +2291,8 @@ ; ; X64-LABEL: test_mm_set_epi64x: ; X64: # BB#0: -; X64-NEXT: movd %rdi, %xmm1 -; X64-NEXT: movd %rsi, %xmm0 +; X64-NEXT: movq %rdi, %xmm1 +; X64-NEXT: movq %rsi, %xmm0 ; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X64-NEXT: retq %res0 = insertelement <2 x i64> undef, i64 %a1, i32 0 @@ -2433,7 +2433,7 @@ ; ; X64-LABEL: test_mm_set1_epi64x: ; X64: # BB#0: -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; X64-NEXT: retq %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0 @@ -2685,8 +2685,8 @@ ; ; X64-LABEL: test_mm_setr_epi64x: ; X64: # BB#0: -; X64-NEXT: movd %rsi, %xmm1 -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rsi, %xmm1 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X64-NEXT: retq %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0 @@ -3249,7 +3249,7 @@ ; ; X64-LABEL: test_mm_storel_epi64: ; X64: # BB#0: -; X64-NEXT: movd %xmm0, %rax +; X64-NEXT: movq %xmm0, %rax ; X64-NEXT: movq %rax, (%rdi) ; X64-NEXT: retq %ext = extractelement <2 x i64> %a1, i32 0 Index: llvm/trunk/test/CodeGen/X86/sse2-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse2-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse2-schedule.ll @@ -1808,32 +1808,32 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) { ; GENERIC-LABEL: test_movd_64: ; GENERIC: # BB#0: -; GENERIC-NEXT: movd %rdi, %xmm1 +; GENERIC-NEXT: movq %rdi, %xmm1 ; GENERIC-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; GENERIC-NEXT: paddq %xmm0, %xmm1 ; GENERIC-NEXT: paddq %xmm0, %xmm2 -; GENERIC-NEXT: movd %xmm2, %rax +; GENERIC-NEXT: movq %xmm2, %rax ; GENERIC-NEXT: movq %xmm1, (%rsi) ; GENERIC-NEXT: retq ; ; ATOM-LABEL: test_movd_64: ; ATOM: # BB#0: ; ATOM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; ATOM-NEXT: movd %rdi, %xmm2 +; ATOM-NEXT: movq %rdi, %xmm2 ; ATOM-NEXT: paddq %xmm0, %xmm2 ; ATOM-NEXT: paddq %xmm0, %xmm1 ; ATOM-NEXT: movq %xmm2, (%rsi) -; ATOM-NEXT: movd %xmm1, %rax +; ATOM-NEXT: movq %xmm1, %rax ; ATOM-NEXT: retq ; ; SLM-LABEL: test_movd_64: ; SLM: # BB#0: ; SLM-NEXT: movq {{.*#+}} xmm2 = mem[0],zero sched: [3:1.00] -; SLM-NEXT: movd %rdi, %xmm1 # sched: [1:0.50] +; SLM-NEXT: movq %rdi, %xmm1 # sched: [1:0.50] ; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50] ; SLM-NEXT: movq %xmm1, (%rsi) # sched: [1:1.00] ; SLM-NEXT: paddq %xmm0, %xmm2 # sched: [1:0.50] -; SLM-NEXT: movd %xmm2, %rax # sched: [1:0.50] +; SLM-NEXT: movq %xmm2, %rax # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_movd_64: Index: llvm/trunk/test/CodeGen/X86/statepoint-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/statepoint-vector.ll +++ llvm/trunk/test/CodeGen/X86/statepoint-vector.ll @@ -22,7 +22,7 @@ entry: ; CHECK-LABEL: @test2 ; CHECK: subq $40, %rsp -; CHECK: movd %rdi, %xmm1 +; CHECK: movq %rdi, %xmm1 ; CHECK: pshufd $68, %xmm1, %xmm1 # xmm1 = xmm1[0,1,0,1] ; CHECK: paddq %xmm0, %xmm1 ; CHECK: movdqa %xmm0, 16(%rsp) Index: llvm/trunk/test/CodeGen/X86/vec_fneg.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_fneg.ll +++ llvm/trunk/test/CodeGen/X86/vec_fneg.ll @@ -10,7 +10,7 @@ define <4 x float> @t1(<4 x float> %Q) nounwind { ; X32-SSE-LABEL: t1: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: xorps .LCPI0_0, %xmm0 +; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: t1: @@ -92,7 +92,7 @@ ; X64-SSE2: # BB#0: ; X64-SSE2-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000 ; X64-SSE2-NEXT: xorq %rdi, %rax -; X64-SSE2-NEXT: movd %rax, %xmm0 +; X64-SSE2-NEXT: movq %rax, %xmm0 ; X64-SSE2-NEXT: retq %bitcast = bitcast i64 %i to <2 x float> %fneg = fsub <2 x float> , %bitcast Index: llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll +++ llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll @@ -20,10 +20,10 @@ ; SSE-LABEL: fptosi_2f64_to_2i64: ; SSE: # BB#0: ; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -128,16 +128,16 @@ ; SSE-LABEL: fptosi_4f64_to_4i64: ; SSE: # BB#0: ; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: cvttsd2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] ; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 +; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: cvttsd2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 @@ -263,7 +263,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subsd %xmm2, %xmm3 @@ -272,7 +272,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rcx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -347,7 +347,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subsd %xmm2, %xmm3 @@ -356,7 +356,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rcx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: pxor %xmm0, %xmm0 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3] @@ -428,7 +428,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm2 +; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subsd %xmm1, %xmm3 @@ -437,7 +437,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rcx ; SSE-NEXT: ucomisd %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; SSE-NEXT: retq @@ -507,7 +507,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subsd %xmm2, %xmm3 @@ -516,13 +516,13 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm0 +; SSE-NEXT: movq %rdx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: cvttsd2si %xmm0, %rax ; SSE-NEXT: xorq %rax, %rcx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovbq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2] ; SSE-NEXT: movaps %xmm1, %xmm0 @@ -586,7 +586,7 @@ ; SSE-NEXT: cvttsd2si %xmm2, %rdx ; SSE-NEXT: ucomisd %xmm3, %xmm2 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm0 +; SSE-NEXT: movq %rdx, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: movaps %xmm2, %xmm4 ; SSE-NEXT: subsd %xmm3, %xmm4 @@ -595,7 +595,7 @@ ; SSE-NEXT: cvttsd2si %xmm2, %rdx ; SSE-NEXT: ucomisd %xmm3, %xmm2 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm2 +; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE-NEXT: movapd %xmm1, %xmm2 ; SSE-NEXT: subsd %xmm3, %xmm2 @@ -604,7 +604,7 @@ ; SSE-NEXT: cvttsd2si %xmm1, %rdx ; SSE-NEXT: ucomisd %xmm3, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm2 +; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: subsd %xmm3, %xmm4 @@ -613,7 +613,7 @@ ; SSE-NEXT: cvttsd2si %xmm1, %rax ; SSE-NEXT: ucomisd %xmm3, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: retq @@ -761,7 +761,7 @@ ; SSE-NEXT: cvttsd2si %xmm1, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 +; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: subsd %xmm2, %xmm4 @@ -770,7 +770,7 @@ ; SSE-NEXT: cvttsd2si %xmm1, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm1 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] ; SSE-NEXT: movapd %xmm0, %xmm1 ; SSE-NEXT: subsd %xmm2, %xmm1 @@ -779,7 +779,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rdx ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm4 ; SSE-NEXT: subsd %xmm2, %xmm4 @@ -788,7 +788,7 @@ ; SSE-NEXT: cvttsd2si %xmm0, %rax ; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2] ; SSE-NEXT: movaps %xmm1, %xmm0 @@ -879,10 +879,10 @@ ; SSE-LABEL: fptosi_2f32_to_2i64: ; SSE: # BB#0: ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -940,10 +940,10 @@ ; SSE-LABEL: fptosi_4f32_to_2i64: ; SSE: # BB#0: ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -1016,19 +1016,19 @@ ; SSE-LABEL: fptosi_4f32_to_4i64: ; SSE: # BB#0: ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 +; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq @@ -1124,19 +1124,19 @@ ; SSE-LABEL: fptosi_8f32_to_4i64: ; SSE: # BB#0: ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax -; SSE-NEXT: movd %rax, %xmm3 +; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq @@ -1245,7 +1245,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rdx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subss %xmm2, %xmm3 @@ -1254,7 +1254,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rcx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -1390,7 +1390,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rdx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subss %xmm2, %xmm3 @@ -1399,7 +1399,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rcx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -1477,7 +1477,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rdx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rdx -; SSE-NEXT: movd %rdx, %xmm1 +; SSE-NEXT: movq %rdx, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: subss %xmm2, %xmm3 @@ -1486,7 +1486,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rcx ; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: cmovaeq %rax, %rcx -; SSE-NEXT: movd %rcx, %xmm0 +; SSE-NEXT: movq %rcx, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq @@ -1685,7 +1685,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm2 +; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 @@ -1695,7 +1695,7 @@ ; SSE-NEXT: cvttss2si %xmm3, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm3 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 +; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] @@ -1706,7 +1706,7 @@ ; SSE-NEXT: cvttss2si %xmm3, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm3 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 +; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 @@ -1715,7 +1715,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq @@ -1863,7 +1863,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm2 +; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 @@ -1873,7 +1873,7 @@ ; SSE-NEXT: cvttss2si %xmm3, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm3 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 +; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] @@ -1884,7 +1884,7 @@ ; SSE-NEXT: cvttss2si %xmm3, %rdx ; SSE-NEXT: ucomiss %xmm1, %xmm3 ; SSE-NEXT: cmovaeq %rcx, %rdx -; SSE-NEXT: movd %rdx, %xmm3 +; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movaps %xmm0, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 @@ -1893,7 +1893,7 @@ ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: ucomiss %xmm1, %xmm0 ; SSE-NEXT: cmovaeq %rcx, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq @@ -2257,9 +2257,9 @@ ; SSE-NEXT: movzwl %ax, %edi ; SSE-NEXT: callq __gnu_h2f_ieee ; SSE-NEXT: cvttss2si %xmm0, %rax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: cvttss2si (%rsp), %rax # 4-byte Folded Reload -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3] @@ -2407,12 +2407,12 @@ ; SSE-NEXT: movq %rdx, %rdi ; SSE-NEXT: movq %rcx, %rsi ; SSE-NEXT: callq __fixtfdi -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: movq %rbx, %rdi ; SSE-NEXT: movq %r14, %rsi ; SSE-NEXT: callq __fixtfdi -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload ; SSE-NEXT: # xmm0 = xmm0[0],mem[0] ; SSE-NEXT: xorps %xmm1, %xmm1 Index: llvm/trunk/test/CodeGen/X86/vec_insert-3.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_insert-3.ll +++ llvm/trunk/test/CodeGen/X86/vec_insert-3.ll @@ -15,7 +15,7 @@ ; ; X64-LABEL: t1: ; X64: # BB#0: -; X64-NEXT: movd %rdi, %xmm1 +; X64-NEXT: movq %rdi, %xmm1 ; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X64-NEXT: retq %tmp1 = insertelement <2 x i64> %tmp, i64 %s, i32 1 Index: llvm/trunk/test/CodeGen/X86/vec_insert-5.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_insert-5.ll +++ llvm/trunk/test/CodeGen/X86/vec_insert-5.ll @@ -19,7 +19,7 @@ ; X64: # BB#0: ; X64-NEXT: # kill: %EDI %EDI %RDI ; X64-NEXT: shll $12, %edi -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X64-NEXT: movq %xmm0, (%rsi) Index: llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll +++ llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll @@ -17,7 +17,7 @@ ; X64-LABEL: t0: ; X64: ## BB#0: ; X64-NEXT: ## kill: %EDI %EDI %RDI -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X64-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll +++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll @@ -19,10 +19,10 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) { ; SSE-LABEL: sitofp_2i64_to_2f64: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: cvtsi2sdq %rax, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -217,17 +217,17 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) { ; SSE-LABEL: sitofp_4i64_to_4f64: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: cvtsi2sdq %rax, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2sdq %rax, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm0[0] @@ -1047,10 +1047,10 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) { ; SSE-LABEL: sitofp_2i64_to_4f32: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] @@ -1111,10 +1111,10 @@ ; SSE-LABEL: sitofp_2i64_to_4f32_zero: ; SSE: # BB#0: ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] @@ -1170,11 +1170,11 @@ ; SSE-LABEL: sitofp_4i64_to_4f32_undef: ; SSE: # BB#0: ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] @@ -1367,17 +1367,17 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) { ; SSE-LABEL: sitofp_4i64_to_4f32: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm3 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] @@ -1610,7 +1610,7 @@ ; SSE-LABEL: uitofp_2i64_to_4f32: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB39_1 ; SSE-NEXT: # BB#2: @@ -1627,7 +1627,7 @@ ; SSE-NEXT: addss %xmm0, %xmm0 ; SSE-NEXT: .LBB39_3: ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB39_4 ; SSE-NEXT: # BB#5: @@ -1729,7 +1729,7 @@ ; SSE-LABEL: uitofp_2i64_to_2f32: ; SSE: # BB#0: ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB40_1 ; SSE-NEXT: # BB#2: @@ -1745,7 +1745,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: addss %xmm1, %xmm1 ; SSE-NEXT: .LBB40_3: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB40_4 ; SSE-NEXT: # BB#5: @@ -1845,7 +1845,7 @@ ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: .LBB41_2: -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB41_3 ; SSE-NEXT: # BB#4: @@ -1863,7 +1863,7 @@ ; SSE-NEXT: .LBB41_5: ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB41_6 ; SSE-NEXT: # BB#7: @@ -2145,7 +2145,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) { ; SSE-LABEL: uitofp_4i64_to_4f32: ; SSE: # BB#0: -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB47_1 ; SSE-NEXT: # BB#2: @@ -2159,7 +2159,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm3 ; SSE-NEXT: addss %xmm3, %xmm3 ; SSE-NEXT: .LBB47_3: -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB47_4 ; SSE-NEXT: # BB#5: @@ -2174,7 +2174,7 @@ ; SSE-NEXT: addss %xmm2, %xmm2 ; SSE-NEXT: .LBB47_6: ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB47_7 ; SSE-NEXT: # BB#8: @@ -2192,7 +2192,7 @@ ; SSE-NEXT: .LBB47_9: ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB47_10 ; SSE-NEXT: # BB#11: @@ -2591,10 +2591,10 @@ ; SSE-LABEL: sitofp_load_2i64_to_2f64: ; SSE: # BB#0: ; SSE-NEXT: movdqa (%rdi), %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2sdq %rax, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] @@ -2733,18 +2733,18 @@ ; SSE: # BB#0: ; SSE-NEXT: movdqa (%rdi), %xmm1 ; SSE-NEXT: movdqa 16(%rdi), %xmm2 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2sdq %rax, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2sdq %rax, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2sdq %rax, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2sdq %rax, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] @@ -3382,17 +3382,17 @@ ; SSE: # BB#0: ; SSE-NEXT: movdqa (%rdi), %xmm1 ; SSE-NEXT: movdqa 16(%rdi), %xmm2 -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm3 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] @@ -3549,34 +3549,34 @@ ; SSE-NEXT: movdqa 16(%rdi), %xmm2 ; SSE-NEXT: movdqa 32(%rdi), %xmm3 ; SSE-NEXT: movdqa 48(%rdi), %xmm4 -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm5 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: cvtsi2ssq %rax, %xmm0 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: movd %xmm4, %rax +; SSE-NEXT: movq %xmm4, %rax ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: xorps %xmm3, %xmm3 ; SSE-NEXT: cvtsi2ssq %rax, %xmm3 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] @@ -3824,7 +3824,7 @@ ; SSE: # BB#0: ; SSE-NEXT: movdqa (%rdi), %xmm1 ; SSE-NEXT: movdqa 16(%rdi), %xmm3 -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB76_1 ; SSE-NEXT: # BB#2: @@ -3838,7 +3838,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm2 ; SSE-NEXT: .LBB76_3: -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB76_4 ; SSE-NEXT: # BB#5: @@ -3853,7 +3853,7 @@ ; SSE-NEXT: addss %xmm0, %xmm0 ; SSE-NEXT: .LBB76_6: ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB76_7 ; SSE-NEXT: # BB#8: @@ -3871,7 +3871,7 @@ ; SSE-NEXT: .LBB76_9: ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB76_10 ; SSE-NEXT: # BB#11: @@ -4190,7 +4190,7 @@ ; SSE-NEXT: movdqa 16(%rdi), %xmm5 ; SSE-NEXT: movdqa 32(%rdi), %xmm2 ; SSE-NEXT: movdqa 48(%rdi), %xmm3 -; SSE-NEXT: movd %xmm5, %rax +; SSE-NEXT: movq %xmm5, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_1 ; SSE-NEXT: # BB#2: @@ -4204,7 +4204,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm4 ; SSE-NEXT: addss %xmm4, %xmm4 ; SSE-NEXT: .LBB80_3: -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_4 ; SSE-NEXT: # BB#5: @@ -4219,7 +4219,7 @@ ; SSE-NEXT: addss %xmm0, %xmm0 ; SSE-NEXT: .LBB80_6: ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1] -; SSE-NEXT: movd %xmm5, %rax +; SSE-NEXT: movq %xmm5, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_7 ; SSE-NEXT: # BB#8: @@ -4234,7 +4234,7 @@ ; SSE-NEXT: addss %xmm6, %xmm6 ; SSE-NEXT: .LBB80_9: ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_10 ; SSE-NEXT: # BB#11: @@ -4250,7 +4250,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm5 ; SSE-NEXT: addss %xmm5, %xmm5 ; SSE-NEXT: .LBB80_12: -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_13 ; SSE-NEXT: # BB#14: @@ -4264,7 +4264,7 @@ ; SSE-NEXT: cvtsi2ssq %rax, %xmm7 ; SSE-NEXT: addss %xmm7, %xmm7 ; SSE-NEXT: .LBB80_15: -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_16 ; SSE-NEXT: # BB#17: @@ -4283,7 +4283,7 @@ ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movq %xmm3, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_19 ; SSE-NEXT: # BB#20: @@ -4302,7 +4302,7 @@ ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movq %xmm2, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB80_22 ; SSE-NEXT: # BB#23: Index: llvm/trunk/test/CodeGen/X86/vec_set-8.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_set-8.ll +++ llvm/trunk/test/CodeGen/X86/vec_set-8.ll @@ -4,7 +4,7 @@ define <2 x i64> @test(i64 %i) nounwind { ; CHECK-LABEL: test: ; CHECK: # BB#0: -; CHECK-NEXT: movd %rdi, %xmm0 +; CHECK-NEXT: movq %rdi, %xmm0 ; CHECK-NEXT: retq %tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0 %tmp11 = insertelement <2 x i64> %tmp10, i64 0, i32 1 Index: llvm/trunk/test/CodeGen/X86/vec_set-C.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_set-C.ll +++ llvm/trunk/test/CodeGen/X86/vec_set-C.ll @@ -10,7 +10,7 @@ ; ; X64-LABEL: t1: ; X64: # BB#0: -; X64-NEXT: movd %rdi, %xmm0 +; X64-NEXT: movq %rdi, %xmm0 ; X64-NEXT: retq %tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0 ret <2 x i64> %tmp8 Index: llvm/trunk/test/CodeGen/X86/vec_shift7.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_shift7.ll +++ llvm/trunk/test/CodeGen/X86/vec_shift7.ll @@ -17,7 +17,7 @@ ; ; X64-LABEL: test1: ; X64: # BB#0: # %entry -; X64-NEXT: movd %xmm0, %rax +; X64-NEXT: movq %xmm0, %rax ; X64-NEXT: retq entry: %c = shl <2 x i64> %a, Index: llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll +++ llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll @@ -10,7 +10,7 @@ ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE-NEXT: pand %xmm1, %xmm0 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_sext: @@ -46,7 +46,7 @@ ; SSE-NEXT: andpd %xmm3, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f64_sext: @@ -285,7 +285,7 @@ ; SSE-NEXT: pcmpgtq %xmm1, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2i64_sext: @@ -321,7 +321,7 @@ ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v4i64_sext: Index: llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll +++ llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll @@ -10,7 +10,7 @@ ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_sext: @@ -46,7 +46,7 @@ ; SSE-NEXT: orpd %xmm3, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE-NEXT: por %xmm2, %xmm0 -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f64_sext: @@ -267,7 +267,7 @@ ; SSE-NEXT: pcmpgtq %xmm1, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2i64_sext: @@ -303,7 +303,7 @@ ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: retq ; ; AVX1-LABEL: test_v4i64_sext: Index: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -11,22 +11,22 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: test_div7_2i64: ; SSE2: # BB#0: -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925 ; SSE2-NEXT: imulq %rcx ; SSE2-NEXT: movq %rdx, %rax ; SSE2-NEXT: shrq $63, %rax ; SSE2-NEXT: sarq %rdx ; SSE2-NEXT: addq %rax, %rdx -; SSE2-NEXT: movd %rdx, %xmm1 +; SSE2-NEXT: movq %rdx, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: imulq %rcx ; SSE2-NEXT: movq %rdx, %rax ; SSE2-NEXT: shrq $63, %rax ; SSE2-NEXT: sarq %rdx ; SSE2-NEXT: addq %rax, %rdx -; SSE2-NEXT: movd %rdx, %xmm0 +; SSE2-NEXT: movq %rdx, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -40,14 +40,14 @@ ; SSE41-NEXT: shrq $63, %rax ; SSE41-NEXT: sarq %rdx ; SSE41-NEXT: addq %rax, %rdx -; SSE41-NEXT: movd %rdx, %xmm1 -; SSE41-NEXT: movd %xmm0, %rax +; SSE41-NEXT: movq %rdx, %xmm1 +; SSE41-NEXT: movq %xmm0, %rax ; SSE41-NEXT: imulq %rcx ; SSE41-NEXT: movq %rdx, %rax ; SSE41-NEXT: shrq $63, %rax ; SSE41-NEXT: sarq %rdx ; SSE41-NEXT: addq %rax, %rdx -; SSE41-NEXT: movd %rdx, %xmm0 +; SSE41-NEXT: movq %rdx, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; @@ -275,7 +275,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: test_rem7_2i64: ; SSE2: # BB#0: -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925 ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: imulq %rsi @@ -286,9 +286,9 @@ ; SSE2-NEXT: leaq (,%rdx,8), %rax ; SSE2-NEXT: subq %rdx, %rax ; SSE2-NEXT: subq %rax, %rcx -; SSE2-NEXT: movd %rcx, %xmm1 +; SSE2-NEXT: movq %rcx, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: imulq %rsi ; SSE2-NEXT: movq %rdx, %rax @@ -298,7 +298,7 @@ ; SSE2-NEXT: leaq (,%rdx,8), %rax ; SSE2-NEXT: subq %rdx, %rax ; SSE2-NEXT: subq %rax, %rcx -; SSE2-NEXT: movd %rcx, %xmm0 +; SSE2-NEXT: movq %rcx, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -316,8 +316,8 @@ ; SSE41-NEXT: leaq (,%rdx,8), %rax ; SSE41-NEXT: subq %rdx, %rax ; SSE41-NEXT: subq %rax, %rcx -; SSE41-NEXT: movd %rcx, %xmm1 -; SSE41-NEXT: movd %xmm0, %rcx +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: movq %xmm0, %rcx ; SSE41-NEXT: movq %rcx, %rax ; SSE41-NEXT: imulq %rsi ; SSE41-NEXT: movq %rdx, %rax @@ -327,7 +327,7 @@ ; SSE41-NEXT: leaq (,%rdx,8), %rax ; SSE41-NEXT: subq %rdx, %rax ; SSE41-NEXT: subq %rax, %rcx -; SSE41-NEXT: movd %rcx, %xmm0 +; SSE41-NEXT: movq %rcx, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -11,7 +11,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: test_div7_2i64: ; SSE2: # BB#0: -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493 ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: mulq %rsi @@ -19,16 +19,16 @@ ; SSE2-NEXT: shrq %rcx ; SSE2-NEXT: addq %rdx, %rcx ; SSE2-NEXT: shrq $2, %rcx -; SSE2-NEXT: movd %rcx, %xmm1 +; SSE2-NEXT: movq %rcx, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: mulq %rsi ; SSE2-NEXT: subq %rdx, %rcx ; SSE2-NEXT: shrq %rcx ; SSE2-NEXT: addq %rdx, %rcx ; SSE2-NEXT: shrq $2, %rcx -; SSE2-NEXT: movd %rcx, %xmm0 +; SSE2-NEXT: movq %rcx, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -43,15 +43,15 @@ ; SSE41-NEXT: shrq %rcx ; SSE41-NEXT: addq %rdx, %rcx ; SSE41-NEXT: shrq $2, %rcx -; SSE41-NEXT: movd %rcx, %xmm1 -; SSE41-NEXT: movd %xmm0, %rcx +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: movq %xmm0, %rcx ; SSE41-NEXT: movq %rcx, %rax ; SSE41-NEXT: mulq %rsi ; SSE41-NEXT: subq %rdx, %rcx ; SSE41-NEXT: shrq %rcx ; SSE41-NEXT: addq %rdx, %rcx ; SSE41-NEXT: shrq $2, %rcx -; SSE41-NEXT: movd %rcx, %xmm0 +; SSE41-NEXT: movq %rcx, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; @@ -255,7 +255,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: test_rem7_2i64: ; SSE2: # BB#0: -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493 ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: mulq %rsi @@ -267,9 +267,9 @@ ; SSE2-NEXT: leaq (,%rax,8), %rdx ; SSE2-NEXT: subq %rax, %rdx ; SSE2-NEXT: subq %rdx, %rcx -; SSE2-NEXT: movd %rcx, %xmm1 +; SSE2-NEXT: movq %rcx, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movq %xmm0, %rcx ; SSE2-NEXT: movq %rcx, %rax ; SSE2-NEXT: mulq %rsi ; SSE2-NEXT: movq %rcx, %rax @@ -280,7 +280,7 @@ ; SSE2-NEXT: leaq (,%rax,8), %rdx ; SSE2-NEXT: subq %rax, %rdx ; SSE2-NEXT: subq %rdx, %rcx -; SSE2-NEXT: movd %rcx, %xmm0 +; SSE2-NEXT: movq %rcx, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -299,8 +299,8 @@ ; SSE41-NEXT: leaq (,%rax,8), %rdx ; SSE41-NEXT: subq %rax, %rdx ; SSE41-NEXT: subq %rdx, %rcx -; SSE41-NEXT: movd %rcx, %xmm1 -; SSE41-NEXT: movd %xmm0, %rcx +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: movq %xmm0, %rcx ; SSE41-NEXT: movq %rcx, %rax ; SSE41-NEXT: mulq %rsi ; SSE41-NEXT: movq %rcx, %rax @@ -311,7 +311,7 @@ ; SSE41-NEXT: leaq (,%rax,8), %rdx ; SSE41-NEXT: subq %rax, %rdx ; SSE41-NEXT: subq %rdx, %rcx -; SSE41-NEXT: movd %rcx, %xmm0 +; SSE41-NEXT: movq %rcx, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll @@ -1579,7 +1579,7 @@ ; SSE-LABEL: foldv2i64: ; SSE: # BB#0: ; SSE-NEXT: movl $55, %eax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: foldv2i64: @@ -1607,7 +1607,7 @@ ; SSE-LABEL: foldv2i64u: ; SSE: # BB#0: ; SSE-NEXT: movl $55, %eax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: foldv2i64u: Index: llvm/trunk/test/CodeGen/X86/vector-pcmp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-pcmp.ll +++ llvm/trunk/test/CodeGen/X86/vector-pcmp.ll @@ -87,23 +87,23 @@ ; SSE2-LABEL: test_strange_type: ; SSE2: # BB#0: ; SSE2-NEXT: sarq $63, %rsi -; SSE2-NEXT: movd %rsi, %xmm0 +; SSE2-NEXT: movq %rsi, %xmm0 ; SSE2-NEXT: notq %rsi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm0, %xmm1 -; SSE2-NEXT: movd %xmm1, %rax +; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: movq %rsi, %rdx ; SSE2-NEXT: retq ; ; SSE42-LABEL: test_strange_type: ; SSE42: # BB#0: ; SSE42-NEXT: sarq $63, %rsi -; SSE42-NEXT: movd %rsi, %xmm0 +; SSE42-NEXT: movq %rsi, %xmm0 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE42-NEXT: pcmpeqd %xmm1, %xmm1 ; SSE42-NEXT: pxor %xmm0, %xmm1 -; SSE42-NEXT: movd %xmm1, %rax +; SSE42-NEXT: movq %xmm1, %rax ; SSE42-NEXT: pextrq $1, %xmm1, %rdx ; SSE42-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-sext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-sext.ll +++ llvm/trunk/test/CodeGen/X86/vector-sext.ll @@ -1207,10 +1207,10 @@ ; SSE-NEXT: movq %rax, %rcx ; SSE-NEXT: shlq $62, %rcx ; SSE-NEXT: sarq $63, %rcx -; SSE-NEXT: movd %rcx, %xmm1 +; SSE-NEXT: movq %rcx, %xmm1 ; SSE-NEXT: shlq $63, %rax ; SSE-NEXT: sarq $63, %rax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; @@ -1687,28 +1687,28 @@ ; SSE2-LABEL: load_sext_4i8_to_4i64: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movsbq 1(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: movsbq (%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm0 +; SSE2-NEXT: movq %rax, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: movsbq 3(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm2 +; SSE2-NEXT: movq %rax, %xmm2 ; SSE2-NEXT: movsbq 2(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_4i8_to_4i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movsbq 1(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: movsbq (%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm0 +; SSSE3-NEXT: movq %rax, %xmm0 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: movsbq 3(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm2 +; SSSE3-NEXT: movq %rax, %xmm2 ; SSSE3-NEXT: movsbq 2(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSSE3-NEXT: retq ; @@ -2038,48 +2038,48 @@ ; SSE2-LABEL: load_sext_8i8_to_8i64: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movsbq 1(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: movsbq (%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm0 +; SSE2-NEXT: movq %rax, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: movsbq 3(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm2 +; SSE2-NEXT: movq %rax, %xmm2 ; SSE2-NEXT: movsbq 2(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: movsbq 5(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm3 +; SSE2-NEXT: movq %rax, %xmm3 ; SSE2-NEXT: movsbq 4(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm2 +; SSE2-NEXT: movq %rax, %xmm2 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE2-NEXT: movsbq 7(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm4 +; SSE2-NEXT: movq %rax, %xmm4 ; SSE2-NEXT: movsbq 6(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm3 +; SSE2-NEXT: movq %rax, %xmm3 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_8i8_to_8i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movsbq 1(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: movsbq (%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm0 +; SSSE3-NEXT: movq %rax, %xmm0 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: movsbq 3(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm2 +; SSSE3-NEXT: movq %rax, %xmm2 ; SSSE3-NEXT: movsbq 2(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSSE3-NEXT: movsbq 5(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm3 +; SSSE3-NEXT: movq %rax, %xmm3 ; SSSE3-NEXT: movsbq 4(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm2 +; SSSE3-NEXT: movq %rax, %xmm2 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSSE3-NEXT: movsbq 7(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm4 +; SSSE3-NEXT: movq %rax, %xmm4 ; SSSE3-NEXT: movsbq 6(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm3 +; SSSE3-NEXT: movq %rax, %xmm3 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] ; SSSE3-NEXT: retq ; @@ -4542,28 +4542,28 @@ ; SSE2-LABEL: load_sext_4i16_to_4i64: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movswq 2(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: movswq (%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm0 +; SSE2-NEXT: movq %rax, %xmm0 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: movswq 6(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm2 +; SSE2-NEXT: movq %rax, %xmm2 ; SSE2-NEXT: movswq 4(%rdi), %rax -; SSE2-NEXT: movd %rax, %xmm1 +; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_4i16_to_4i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movswq 2(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: movswq (%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm0 +; SSSE3-NEXT: movq %rax, %xmm0 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: movswq 6(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm2 +; SSSE3-NEXT: movq %rax, %xmm2 ; SSSE3-NEXT: movswq 4(%rdi), %rax -; SSSE3-NEXT: movd %rax, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSSE3-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -986,7 +986,7 @@ define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) { ; SSE-LABEL: insert_reg_and_zero_v2i64: ; SSE: # BB#0: -; SSE-NEXT: movd %rdi, %xmm0 +; SSE-NEXT: movq %rdi, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: insert_reg_and_zero_v2i64: @@ -1048,25 +1048,25 @@ define <2 x i64> @insert_reg_lo_v2i64(i64 %a, <2 x i64> %b) { ; SSE2-LABEL: insert_reg_lo_v2i64: ; SSE2: # BB#0: -; SSE2-NEXT: movd %rdi, %xmm1 +; SSE2-NEXT: movq %rdi, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq ; ; SSE3-LABEL: insert_reg_lo_v2i64: ; SSE3: # BB#0: -; SSE3-NEXT: movd %rdi, %xmm1 +; SSE3-NEXT: movq %rdi, %xmm1 ; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: insert_reg_lo_v2i64: ; SSSE3: # BB#0: -; SSSE3-NEXT: movd %rdi, %xmm1 +; SSSE3-NEXT: movq %rdi, %xmm1 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: insert_reg_lo_v2i64: ; SSE41: # BB#0: -; SSE41-NEXT: movd %rdi, %xmm1 +; SSE41-NEXT: movq %rdi, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; @@ -1140,7 +1140,7 @@ define <2 x i64> @insert_reg_hi_v2i64(i64 %a, <2 x i64> %b) { ; SSE-LABEL: insert_reg_hi_v2i64: ; SSE: # BB#0: -; SSE-NEXT: movd %rdi, %xmm1 +; SSE-NEXT: movq %rdi, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -2104,25 +2104,25 @@ define <4 x i32> @insert_reg_lo_v4i32(i64 %a, <4 x i32> %b) { ; SSE2-LABEL: insert_reg_lo_v4i32: ; SSE2: # BB#0: -; SSE2-NEXT: movd %rdi, %xmm1 +; SSE2-NEXT: movq %rdi, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq ; ; SSE3-LABEL: insert_reg_lo_v4i32: ; SSE3: # BB#0: -; SSE3-NEXT: movd %rdi, %xmm1 +; SSE3-NEXT: movq %rdi, %xmm1 ; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE3-NEXT: retq ; ; SSSE3-LABEL: insert_reg_lo_v4i32: ; SSSE3: # BB#0: -; SSSE3-NEXT: movd %rdi, %xmm1 +; SSSE3-NEXT: movq %rdi, %xmm1 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: insert_reg_lo_v4i32: ; SSE41: # BB#0: -; SSE41-NEXT: movd %rdi, %xmm1 +; SSE41-NEXT: movq %rdi, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; @@ -2191,7 +2191,7 @@ define <4 x i32> @insert_reg_hi_v4i32(i64 %a, <4 x i32> %b) { ; SSE-LABEL: insert_reg_hi_v4i32: ; SSE: # BB#0: -; SSE-NEXT: movd %rdi, %xmm1 +; SSE-NEXT: movq %rdi, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; Index: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll @@ -1257,7 +1257,7 @@ ; SSE-LABEL: trunc_sub_const_v4i64_v4i32: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm2, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -1301,7 +1301,7 @@ ; SSE-LABEL: trunc_sub_const_v8i64_v8i16: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm4 +; SSE-NEXT: movq %rax, %xmm4 ; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm4, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -1418,7 +1418,7 @@ ; SSE-LABEL: trunc_sub_const_v16i64_v16i8: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm8 +; SSE-NEXT: movq %rax, %xmm8 ; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm8, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -2411,7 +2411,7 @@ ; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: paddq %xmm3, %xmm1 ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 @@ -2554,7 +2554,7 @@ ; SSE-LABEL: trunc_mul_const_v16i64_v16i8: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm8 +; SSE-NEXT: movq %rax, %xmm8 ; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm0, %xmm9 ; SSE-NEXT: pmuludq %xmm8, %xmm9 Index: llvm/trunk/test/CodeGen/X86/vector-trunc.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-trunc.ll +++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll @@ -906,7 +906,7 @@ ; SSE-LABEL: trunc2i64_i64: ; SSE: # BB#0: # %entry ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movd %xmm0, %rax +; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: retq ; ; AVX-LABEL: trunc2i64_i64: @@ -1031,19 +1031,19 @@ ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc4i32_i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: movq %xmm0, %rax ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc4i32_i64: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE41-NEXT: movd %xmm0, %rax +; SSE41-NEXT: movq %xmm0, %rax ; SSE41-NEXT: retq ; ; AVX-LABEL: trunc4i32_i64: @@ -1158,19 +1158,19 @@ ; SSE2: # BB#0: # %entry ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: packuswb %xmm0, %xmm0 -; SSE2-NEXT: movd %xmm0, %rax +; SSE2-NEXT: movq %xmm0, %rax ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc8i16_i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSSE3-NEXT: movd %xmm0, %rax +; SSSE3-NEXT: movq %xmm0, %rax ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc8i16_i64: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSE41-NEXT: movd %xmm0, %rax +; SSE41-NEXT: movq %xmm0, %rax ; SSE41-NEXT: retq ; ; AVX-LABEL: trunc8i16_i64: Index: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll @@ -1249,7 +1249,7 @@ ; SSE-LABEL: foldv2i64: ; SSE: # BB#0: ; SSE-NEXT: movl $8, %eax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: foldv2i64: @@ -1271,7 +1271,7 @@ ; SSE-LABEL: foldv2i64u: ; SSE: # BB#0: ; SSE-NEXT: movl $8, %eax -; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: foldv2i64u: Index: llvm/trunk/test/CodeGen/X86/vmovq.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vmovq.ll +++ llvm/trunk/test/CodeGen/X86/vmovq.ll @@ -6,7 +6,7 @@ ; SSE-LABEL: PR25554: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm1 +; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] ; SSE-NEXT: paddq %xmm1, %xmm0 Index: llvm/trunk/test/CodeGen/X86/vshift-1.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vshift-1.ll +++ llvm/trunk/test/CodeGen/X86/vshift-1.ll @@ -39,7 +39,7 @@ ; ; X64-LABEL: shift1b: ; X64: # BB#0: # %entry -; X64-NEXT: movd %rsi, %xmm1 +; X64-NEXT: movq %rsi, %xmm1 ; X64-NEXT: psllq %xmm1, %xmm0 ; X64-NEXT: movdqa %xmm0, (%rdi) ; X64-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/vshift-2.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vshift-2.ll +++ llvm/trunk/test/CodeGen/X86/vshift-2.ll @@ -39,7 +39,7 @@ ; ; X64-LABEL: shift1b: ; X64: # BB#0: # %entry -; X64-NEXT: movd %rsi, %xmm1 +; X64-NEXT: movq %rsi, %xmm1 ; X64-NEXT: psrlq %xmm1, %xmm0 ; X64-NEXT: movdqa %xmm0, (%rdi) ; X64-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/vsplit-and.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vsplit-and.ll +++ llvm/trunk/test/CodeGen/X86/vsplit-and.ll @@ -23,13 +23,13 @@ define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly { ; CHECK-LABEL: t2: ; CHECK: # BB#0: -; CHECK-NEXT: movd %r9, %xmm1 -; CHECK-NEXT: movd %r8, %xmm0 +; CHECK-NEXT: movq %r9, %xmm1 +; CHECK-NEXT: movq %r8, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; CHECK-NEXT: movd %rdx, %xmm2 -; CHECK-NEXT: movd %rsi, %xmm1 +; CHECK-NEXT: movq %rdx, %xmm2 +; CHECK-NEXT: movq %rsi, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; CHECK-NEXT: movd %rcx, %xmm2 +; CHECK-NEXT: movq %rcx, %xmm2 ; CHECK-NEXT: movq {{.*#+}} xmm3 = mem[0],zero ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqq %xmm4, %xmm2 Index: llvm/trunk/test/CodeGen/X86/widen_cast-5.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/widen_cast-5.ll +++ llvm/trunk/test/CodeGen/X86/widen_cast-5.ll @@ -16,7 +16,7 @@ ; ; X64-LABEL: convert: ; X64: ## BB#0: ## %entry -; X64-NEXT: movd %rsi, %xmm0 +; X64-NEXT: movq %rsi, %xmm0 ; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; X64-NEXT: pxor {{.*}}(%rip), %xmm0 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] Index: llvm/trunk/test/CodeGen/X86/widen_conv-3.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/widen_conv-3.ll +++ llvm/trunk/test/CodeGen/X86/widen_conv-3.ll @@ -105,7 +105,7 @@ ; X64-SSE2-LABEL: convert_v3i8_to_v3f32: ; X64-SSE2: # BB#0: # %entry ; X64-SSE2-NEXT: movzwl (%rsi), %eax -; X64-SSE2-NEXT: movd %rax, %xmm0 +; X64-SSE2-NEXT: movq %rax, %xmm0 ; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X64-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; X64-SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -129,7 +129,7 @@ ; X64-SSE42: # BB#0: # %entry ; X64-SSE42-NEXT: movzbl 2(%rsi), %eax ; X64-SSE42-NEXT: movzwl (%rsi), %ecx -; X64-SSE42-NEXT: movd %rcx, %xmm0 +; X64-SSE42-NEXT: movq %rcx, %xmm0 ; X64-SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; X64-SSE42-NEXT: pinsrd $2, %eax, %xmm0 ; X64-SSE42-NEXT: pslld $24, %xmm0 Index: llvm/trunk/test/CodeGen/X86/widen_conv-4.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/widen_conv-4.ll +++ llvm/trunk/test/CodeGen/X86/widen_conv-4.ll @@ -130,7 +130,7 @@ ; X64-SSE2-LABEL: convert_v3i8_to_v3f32: ; X64-SSE2: # BB#0: # %entry ; X64-SSE2-NEXT: movzwl (%rsi), %eax -; X64-SSE2-NEXT: movd %rax, %xmm0 +; X64-SSE2-NEXT: movq %rax, %xmm0 ; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X64-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; X64-SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) @@ -154,7 +154,7 @@ ; X64-SSE42: # BB#0: # %entry ; X64-SSE42-NEXT: movzbl 2(%rsi), %eax ; X64-SSE42-NEXT: movzwl (%rsi), %ecx -; X64-SSE42-NEXT: movd %rcx, %xmm0 +; X64-SSE42-NEXT: movq %rcx, %xmm0 ; X64-SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; X64-SSE42-NEXT: pinsrd $2, %eax, %xmm0 ; X64-SSE42-NEXT: pand {{.*}}(%rip), %xmm0 Index: llvm/trunk/test/MC/Disassembler/X86/x86-64.txt =================================================================== --- llvm/trunk/test/MC/Disassembler/X86/x86-64.txt +++ llvm/trunk/test/MC/Disassembler/X86/x86-64.txt @@ -248,16 +248,16 @@ # CHECK: movd %mm0, %rax 0x48 0x0f 0x7e 0xc0 -# CHECK: movd (%rax), %xmm0 +# CHECK: movq (%rax), %xmm0 0x66 0x48 0x0f 0x6e 0x00 -# CHECK: movd %rax, %xmm0 +# CHECK: movq %rax, %xmm0 0x66 0x48 0x0f 0x6e 0xc0 -# CHECK: movd %xmm0, (%rax) +# CHECK: movq %xmm0, (%rax) 0x66 0x48 0x0f 0x7e 0x00 -# CHECK: movd %xmm0, %rax +# CHECK: movq %xmm0, %rax 0x66 0x48 0x0f 0x7e 0xc0 # CHECK: pextrw $3, %xmm3, %ecx Index: llvm/trunk/test/MC/X86/x86-64.s =================================================================== --- llvm/trunk/test/MC/X86/x86-64.s +++ llvm/trunk/test/MC/X86/x86-64.s @@ -1297,17 +1297,13 @@ // CHECK: encoding: [0x48,0x0f,0x00,0xc8] str %rax -// CHECK: movd %rdi, %xmm0 +// CHECK: movq %rdi, %xmm0 // CHECK: encoding: [0x66,0x48,0x0f,0x6e,0xc7] movq %rdi,%xmm0 -// CHECK: movd %rdi, %xmm0 -// CHECK: encoding: [0x66,0x48,0x0f,0x6e,0xc7] - movd %rdi,%xmm0 - -// CHECK: movd %xmm0, %rax +// CHECK: movq %xmm0, %rax // CHECK: encoding: [0x66,0x48,0x0f,0x7e,0xc0] - movd %xmm0, %rax + movq %xmm0, %rax // CHECK: movntil %eax, (%rdi) // CHECK: encoding: [0x0f,0xc3,0x07] @@ -1470,13 +1466,13 @@ fdivr %st(1) // CHECK: movd %xmm0, %eax -// CHECK: movd %xmm0, %rax -// CHECK: movd %xmm0, %rax +// CHECK: movq %xmm0, %rax +// CHECK: movq %xmm0, %rax // CHECK: vmovd %xmm0, %eax // CHECK: vmovq %xmm0, %rax // CHECK: vmovq %xmm0, %rax movd %xmm0, %eax -movd %xmm0, %rax +movq %xmm0, %rax movq %xmm0, %rax vmovd %xmm0, %eax vmovd %xmm0, %rax