Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -8468,6 +8468,8 @@ { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, + { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, + { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, @@ -8484,6 +8486,8 @@ { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, + { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, + { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, @@ -8560,6 +8564,8 @@ { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, + { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm, X86::VMOVQI2PQIZrm, }, + { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm, X86::VMOVDI2PDIZrm, }, }; static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { Index: test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll =================================================================== --- test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll +++ test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -; CHECK: xorpd {{.*}}{{LCPI0_0|__xmm@}} +; CHECK: xorps {{.*}}{{LCPI0_0|__xmm@}} define void @casin({ double, double }* sret %agg.result, double %z.0, double %z.1) nounwind { entry: %memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3] Index: test/CodeGen/X86/2012-1-10-buildvector.ll =================================================================== --- test/CodeGen/X86/2012-1-10-buildvector.ll +++ test/CodeGen/X86/2012-1-10-buildvector.ll @@ -18,8 +18,8 @@ define void @bad_insert(i32 %t) { ; CHECK-LABEL: bad_insert: ; CHECK: # BB#0: -; CHECK-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovdqa %ymm0, (%eax) +; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: vmovaps %ymm0, (%eax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl %v2 = insertelement <8 x i32> zeroinitializer, i32 %t, i32 0 Index: test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -403,7 +403,7 @@ ; CHECK-LABEL: test_x86_sse2_storeu_pd: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] ; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vmovupd %xmm0, (%eax) Index: test/CodeGen/X86/avx-shuffle-x86_32.ll =================================================================== --- test/CodeGen/X86/avx-shuffle-x86_32.ll +++ test/CodeGen/X86/avx-shuffle-x86_32.ll @@ -16,7 +16,7 @@ ; CHECK-LABEL: test2: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero ; CHECK-NEXT: retl %v9 = load <4 x i16>, <4 x i16> * %v, align 8 Index: test/CodeGen/X86/avx2-vbroadcast.ll =================================================================== --- test/CodeGen/X86/avx2-vbroadcast.ll +++ test/CodeGen/X86/avx2-vbroadcast.ll @@ -279,7 +279,7 @@ ; X32-AVX2-LABEL: broadcast_mem_v4i16_v16i16: ; X32-AVX2: ## BB#0: ; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X32-AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; X32-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,4,5,6,7,6,7],zero,zero ; X32-AVX2-NEXT: vpbroadcastq %xmm0, %ymm0 ; X32-AVX2-NEXT: retl Index: test/CodeGen/X86/avx512-mov.ll =================================================================== --- test/CodeGen/X86/avx512-mov.ll +++ test/CodeGen/X86/avx512-mov.ll @@ -31,7 +31,7 @@ define <4 x i32> @test4(i32* %x) { ; CHECK-LABEL: test4: ; CHECK: ## BB#0: -; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07] +; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07] ; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: retq ## encoding: [0xc3] %y = load i32, i32* %x @@ -89,7 +89,7 @@ define <4 x i32> @test10(i32* %x) { ; CHECK-LABEL: test10: ; CHECK: ## BB#0: -; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07] +; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07] ; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: retq ## encoding: [0xc3] %y = load i32, i32* %x, align 4 @@ -140,7 +140,7 @@ define <4 x i32> @test15(i32* %x) { ; CHECK-LABEL: test15: ; CHECK: ## BB#0: -; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07] +; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07] ; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: retq ## encoding: [0xc3] %y = load i32, i32* %x, align 4 Index: test/CodeGen/X86/fp-logic.ll =================================================================== --- test/CodeGen/X86/fp-logic.ll +++ test/CodeGen/X86/fp-logic.ll @@ -231,7 +231,7 @@ ; CHECK-LABEL: f7_double: ; CHECK: # BB#0: ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero -; CHECK-NEXT: andpd %xmm1, %xmm0 +; CHECK-NEXT: andps %xmm1, %xmm0 ; CHECK-NEXT: retq ; %bc1 = bitcast double %x to i64 Index: test/CodeGen/X86/fp128-cast.ll =================================================================== --- test/CodeGen/X86/fp128-cast.ll +++ test/CodeGen/X86/fp128-cast.ll @@ -46,7 +46,7 @@ ; X64-LABEL: TestFPExtF64_F128: ; X64: movsd vf64(%rip), %xmm0 ; X64-NEXT: callq __extenddftf2 -; X64-NEXT: movapd %xmm0, vf128(%rip) +; X64-NEXT: movaps %xmm0, vf128(%rip) ; X64: ret } Index: test/CodeGen/X86/i64-mem-copy.ll =================================================================== --- test/CodeGen/X86/i64-mem-copy.ll +++ test/CodeGen/X86/i64-mem-copy.ll @@ -69,6 +69,7 @@ define void @PR23476(<5 x i64> %in, i64* %out, i32 %index) { ; X32-LABEL: PR23476: ; X32: movsd {{.*#+}} xmm0 = mem[0],zero +; X32: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movsd %xmm0, (%eax) %ext = extractelement <5 x i64> %in, i32 %index store i64 %ext, i64* %out, align 8 Index: test/CodeGen/X86/logical-load-fold.ll =================================================================== --- test/CodeGen/X86/logical-load-fold.ll +++ test/CodeGen/X86/logical-load-fold.ll @@ -1,27 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX -; Although we have the ability to fold an unaligned load with AVX +; Although we have the ability to fold an unaligned load with AVX ; and under special conditions with some SSE implementations, we ; can not fold the load under any circumstances in these test ; cases because they are not 16-byte loads. The load must be ; executed as a scalar ('movs*') with a zero extension to -; 128-bits and then used in the packed logical ('andp*') op. +; 128-bits and then used in the packed logical ('andp*') op. ; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371 define double @load_double_no_fold(double %x, double %y) { ; SSE2-LABEL: load_double_no_fold: -; SSE2: BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: cmplesd %xmm0, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: andpd %xmm1, %xmm0 +; SSE2-NEXT: andps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: load_double_no_fold: -; AVX: BB#0: +; AVX: # BB#0: ; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = fcmp oge double %x, %y @@ -32,14 +33,14 @@ define float @load_float_no_fold(float %x, float %y) { ; SSE2-LABEL: load_float_no_fold: -; SSE2: BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: cmpless %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: andps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: load_float_no_fold: -; AVX: BB#0: +; AVX: # BB#0: ; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 Index: test/CodeGen/X86/merge-consecutive-loads-128.ll =================================================================== --- test/CodeGen/X86/merge-consecutive-loads-128.ll +++ test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -416,12 +416,12 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_4i32_i32_3zuu: ; SSE: # BB#0: -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_4i32_i32_3zuu: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_4i32_i32_3zuu: @@ -436,7 +436,7 @@ ; X32-SSE41-LABEL: merge_4i32_i32_3zuu: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3 %val0 = load i32, i32* %ptr0 @@ -448,12 +448,12 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_4i32_i32_34uu: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_4i32_i32_34uu: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_4i32_i32_34uu: @@ -469,7 +469,7 @@ ; X32-SSE41-LABEL: merge_4i32_i32_34uu: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3 %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 4 @@ -483,12 +483,12 @@ define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_4i32_i32_45zz: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_4i32_i32_45zz: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_4i32_i32_45zz: @@ -506,7 +506,7 @@ ; X32-SSE41-LABEL: merge_4i32_i32_45zz: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 4 %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 5 @@ -599,12 +599,12 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_8i16_i16_34uuuuuu: ; SSE: # BB#0: -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_8i16_i16_34uuuuuu: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_8i16_i16_34uuuuuu: @@ -620,7 +620,7 @@ ; X32-SSE41-LABEL: merge_8i16_i16_34uuuuuu: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 3 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 4 @@ -634,12 +634,12 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_8i16_i16_45u7zzzz: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_8i16_i16_45u7zzzz: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz: @@ -667,7 +667,7 @@ ; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5 @@ -811,12 +811,12 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz: ; SSE: # BB#0: -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz: @@ -839,7 +839,7 @@ ; X32-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1 @@ -861,12 +861,12 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp { ; SSE-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: @@ -905,7 +905,7 @@ ; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-SSE41-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1 @@ -934,14 +934,14 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) { ; SSE-LABEL: merge_4i32_i32_combine: ; SSE: # BB#0: -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: movdqa %xmm0, (%rdi) +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movaps %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX-LABEL: merge_4i32_i32_combine: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovaps %xmm0, (%rdi) ; AVX-NEXT: retq ; ; X32-SSE1-LABEL: merge_4i32_i32_combine: @@ -959,8 +959,8 @@ ; X32-SSE41: # BB#0: ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-SSE41-NEXT: movdqa %xmm0, (%eax) +; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE41-NEXT: movaps %xmm0, (%eax) ; X32-SSE41-NEXT: retl %1 = getelementptr i32, i32* %src, i32 0 %2 = load i32, i32* %1 Index: test/CodeGen/X86/merge-consecutive-loads-256.ll =================================================================== --- test/CodeGen/X86/merge-consecutive-loads-256.ll +++ test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -210,13 +210,13 @@ define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_4i64_i64_1zzu: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4i64_i64_1zzu: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1 %val0 = load i64, i64* %ptr0 @@ -385,32 +385,18 @@ } define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline ssp { -; AVX1-LABEL: merge_8i32_i32_56zz9uzz: -; AVX1: # BB#0: -; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: merge_8i32_i32_56zz9uzz: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: merge_8i32_i32_56zz9uzz: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX512F-NEXT: retq +; AVX-LABEL: merge_8i32_i32_56zz9uzz: +; AVX: # BB#0: +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; X32-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X32-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5 @@ -473,13 +459,13 @@ define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9 @@ -497,13 +483,13 @@ define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5 @@ -583,13 +569,13 @@ define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5 @@ -606,13 +592,13 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2 %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3 Index: test/CodeGen/X86/merge-consecutive-loads-512.ll =================================================================== --- test/CodeGen/X86/merge-consecutive-loads-512.ll +++ test/CodeGen/X86/merge-consecutive-loads-512.ll @@ -372,13 +372,13 @@ define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable noinline ssp { ; ALL-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz: ; ALL: # BB#0: -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: retq ; ; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1 %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 2 @@ -486,19 +486,19 @@ define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) nounwind uwtable noinline ssp { ; AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512BW-NEXT: retq ; ; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 1 @@ -518,13 +518,13 @@ define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp { ; ALL-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: ; ALL: # BB#0: -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: retq ; ; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4 %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5 @@ -541,19 +541,19 @@ define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp { ; AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512BW-NEXT: retq ; ; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2 @@ -573,19 +573,19 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp { ; AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512BW-NEXT: retq ; ; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1 @@ -611,19 +611,19 @@ define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp { ; AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512BW-NEXT: retq ; ; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1 Index: test/CodeGen/X86/mmx-arg-passing-x86-64.ll =================================================================== --- test/CodeGen/X86/mmx-arg-passing-x86-64.ll +++ test/CodeGen/X86/mmx-arg-passing-x86-64.ll @@ -10,7 +10,7 @@ ; X86-64-LABEL: t3: ; X86-64: ## BB#0: ; X86-64-NEXT: movq _g_v8qi@{{.*}}(%rip), %rax -; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-64-NEXT: movb $1, %al ; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL %tmp3 = load <8 x i8>, <8 x i8>* @g_v8qi, align 8 Index: test/CodeGen/X86/pr11334.ll =================================================================== --- test/CodeGen/X86/pr11334.ll +++ test/CodeGen/X86/pr11334.ll @@ -85,14 +85,14 @@ define void @test_vector_creation() nounwind { ; SSE-LABEL: test_vector_creation: ; SSE: # BB#0: -; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm0, (%rax) ; SSE-NEXT: retq ; ; AVX-LABEL: test_vector_creation: ; AVX: # BB#0: -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX-NEXT: vmovaps %ymm0, (%rax) Index: test/CodeGen/X86/pr2656.ll =================================================================== --- test/CodeGen/X86/pr2656.ll +++ test/CodeGen/X86/pr2656.ll @@ -41,8 +41,8 @@ define double @PR22371(double %x) { ; CHECK-LABEL: PR22371: ; CHECK: movsd 16(%esp), %xmm0 -; CHECK-NEXT: andpd LCPI1_0, %xmm0 -; CHECK-NEXT: movlpd %xmm0, (%esp) +; CHECK-NEXT: andps LCPI1_0, %xmm0 +; CHECK-NEXT: movlps %xmm0, (%esp) %call = tail call double @fabs(double %x) #0 ret double %call } Index: test/CodeGen/X86/scalar-int-to-fp.ll =================================================================== --- test/CodeGen/X86/scalar-int-to-fp.ll +++ test/CodeGen/X86/scalar-int-to-fp.ll @@ -74,14 +74,14 @@ } ; CHECK-LABEL: u64_to_f -; AVX512_32: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512_32: vmovq %xmm0, {{[0-9]+}}(%esp) +; AVX512_32: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512_32: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX512_32: fildll ; AVX512_64: vcvtusi2ssq -; SSE2_32: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2_32: movq %xmm0, {{[0-9]+}}(%esp) +; SSE2_32: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE2_32: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE2_32: fildll ; SSE2_64: cvtsi2ssq Index: test/CodeGen/X86/sse-fcopysign.ll =================================================================== --- test/CodeGen/X86/sse-fcopysign.ll +++ test/CodeGen/X86/sse-fcopysign.ll @@ -94,11 +94,11 @@ ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: addss 20(%ebp), %xmm0 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero -; X32-NEXT: andpd {{\.LCPI.*}}, %xmm1 +; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 ; X32-NEXT: cvtss2sd %xmm0, %xmm0 -; X32-NEXT: andpd {{\.LCPI.*}}, %xmm0 -; X32-NEXT: orpd %xmm1, %xmm0 -; X32-NEXT: movlpd %xmm0, (%esp) +; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X32-NEXT: orps %xmm1, %xmm0 +; X32-NEXT: movlps %xmm0, (%esp) ; X32-NEXT: fldl (%esp) ; X32-NEXT: movl %ebp, %esp ; X32-NEXT: popl %ebp Index: test/CodeGen/X86/sse-minmax.ll =================================================================== --- test/CodeGen/X86/sse-minmax.ll +++ test/CodeGen/X86/sse-minmax.ll @@ -779,11 +779,11 @@ ; STRICT-LABEL: oge_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero -; STRICT-NEXT: movapd %xmm1, %xmm2 +; STRICT-NEXT: movaps %xmm1, %xmm2 ; STRICT-NEXT: cmplesd %xmm0, %xmm2 -; STRICT-NEXT: andpd %xmm2, %xmm0 -; STRICT-NEXT: andnpd %xmm1, %xmm2 -; STRICT-NEXT: orpd %xmm2, %xmm0 +; STRICT-NEXT: andps %xmm2, %xmm0 +; STRICT-NEXT: andnps %xmm1, %xmm2 +; STRICT-NEXT: orps %xmm2, %xmm0 ; STRICT-NEXT: retq ; ; RELAX-LABEL: oge_y: @@ -800,12 +800,12 @@ ; STRICT-LABEL: ole_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm0, %xmm1 ; STRICT-NEXT: cmplesd %xmm2, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm0 -; STRICT-NEXT: andnpd %xmm2, %xmm1 -; STRICT-NEXT: orpd %xmm0, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm0 +; STRICT-NEXT: andnps %xmm2, %xmm1 +; STRICT-NEXT: orps %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; RELAX-LABEL: ole_y: @@ -822,12 +822,12 @@ ; STRICT-LABEL: oge_inverse_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm2, %xmm1 ; STRICT-NEXT: cmplesd %xmm0, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm2 -; STRICT-NEXT: andnpd %xmm0, %xmm1 -; STRICT-NEXT: orpd %xmm2, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm2 +; STRICT-NEXT: andnps %xmm0, %xmm1 +; STRICT-NEXT: orps %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; UNSAFE-LABEL: oge_inverse_y: @@ -851,12 +851,12 @@ ; STRICT-LABEL: ole_inverse_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm0, %xmm1 ; STRICT-NEXT: cmplesd %xmm2, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm2 -; STRICT-NEXT: andnpd %xmm0, %xmm1 -; STRICT-NEXT: orpd %xmm2, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm2 +; STRICT-NEXT: andnps %xmm0, %xmm1 +; STRICT-NEXT: orps %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; UNSAFE-LABEL: ole_inverse_y: @@ -880,12 +880,12 @@ ; STRICT-LABEL: ugt_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm0, %xmm1 ; STRICT-NEXT: cmpnlesd %xmm2, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm0 -; STRICT-NEXT: andnpd %xmm2, %xmm1 -; STRICT-NEXT: orpd %xmm0, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm0 +; STRICT-NEXT: andnps %xmm2, %xmm1 +; STRICT-NEXT: orps %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; RELAX-LABEL: ugt_y: @@ -902,11 +902,11 @@ ; STRICT-LABEL: ult_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero -; STRICT-NEXT: movapd %xmm1, %xmm2 +; STRICT-NEXT: movaps %xmm1, %xmm2 ; STRICT-NEXT: cmpnlesd %xmm0, %xmm2 -; STRICT-NEXT: andpd %xmm2, %xmm0 -; STRICT-NEXT: andnpd %xmm1, %xmm2 -; STRICT-NEXT: orpd %xmm2, %xmm0 +; STRICT-NEXT: andps %xmm2, %xmm0 +; STRICT-NEXT: andnps %xmm1, %xmm2 +; STRICT-NEXT: orps %xmm2, %xmm0 ; STRICT-NEXT: retq ; ; RELAX-LABEL: ult_y: @@ -923,12 +923,12 @@ ; STRICT-LABEL: ugt_inverse_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm0, %xmm1 +; STRICT-NEXT: movaps %xmm0, %xmm1 ; STRICT-NEXT: cmpnlesd %xmm2, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm2 -; STRICT-NEXT: andnpd %xmm0, %xmm1 -; STRICT-NEXT: orpd %xmm2, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm2 +; STRICT-NEXT: andnps %xmm0, %xmm1 +; STRICT-NEXT: orps %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; UNSAFE-LABEL: ugt_inverse_y: @@ -952,12 +952,12 @@ ; STRICT-LABEL: ult_inverse_y: ; STRICT: # BB#0: ; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero -; STRICT-NEXT: movapd %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm2, %xmm1 ; STRICT-NEXT: cmpnlesd %xmm0, %xmm1 -; STRICT-NEXT: andpd %xmm1, %xmm2 -; STRICT-NEXT: andnpd %xmm0, %xmm1 -; STRICT-NEXT: orpd %xmm2, %xmm1 -; STRICT-NEXT: movapd %xmm1, %xmm0 +; STRICT-NEXT: andps %xmm1, %xmm2 +; STRICT-NEXT: andnps %xmm0, %xmm1 +; STRICT-NEXT: orps %xmm2, %xmm1 +; STRICT-NEXT: movaps %xmm1, %xmm0 ; STRICT-NEXT: retq ; ; UNSAFE-LABEL: ult_inverse_y: Index: test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll +++ test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll @@ -55,7 +55,7 @@ define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind { ; X64-LABEL: test_mm_loadu_si64: ; X64: # BB#0: -; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: retq %ld = load i64, i64* %a0, align 1 %res0 = insertelement <2 x i64> undef, i64 %ld, i32 0 Index: test/CodeGen/X86/sse2-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -1275,7 +1275,7 @@ define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind { ; X32-LABEL: test_mm_cvtsi32_si128: ; X32: # BB#0: -; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: retl ; ; X64-LABEL: test_mm_cvtsi32_si128: @@ -1523,12 +1523,12 @@ ; X32-LABEL: test_mm_loadl_epi64: ; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: retl ; ; X64-LABEL: test_mm_loadl_epi64: ; X64: # BB#0: -; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: retq %bc = bitcast <2 x i64>* %a1 to i64* %ld = load i64, i64* %bc, align 1 @@ -2326,7 +2326,7 @@ define <2 x double> @test_mm_set_sd(double %a0) nounwind { ; X32-LABEL: test_mm_set_sd: ; X32: # BB#0: -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; X32-NEXT: retl ; Index: test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | FileCheck %s define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) { @@ -98,7 +98,7 @@ ; CHECK-LABEL: test_x86_sse2_storeu_pd: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] ; CHECK-NEXT: addpd %xmm0, %xmm1 ; CHECK-NEXT: movupd %xmm1, (%eax) Index: test/CodeGen/X86/sse2.ll =================================================================== --- test/CodeGen/X86/sse2.ll +++ test/CodeGen/X86/sse2.ll @@ -76,7 +76,7 @@ ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl (%eax), %eax -; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] Index: test/CodeGen/X86/uint64-to-float.ll =================================================================== --- test/CodeGen/X86/uint64-to-float.ll +++ test/CodeGen/X86/uint64-to-float.ll @@ -13,8 +13,8 @@ ; X86-NEXT: movl %esp, %ebp ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $16, %esp -; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: movq %xmm0, {{[0-9]+}}(%esp) +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl $0, 12(%ebp) ; X86-NEXT: setns %al Index: test/CodeGen/X86/uint_to_fp-2.ll =================================================================== --- test/CodeGen/X86/uint_to_fp-2.ll +++ test/CodeGen/X86/uint_to_fp-2.ll @@ -7,8 +7,8 @@ ; CHECK: # BB#0: # %entry ; CHECK-NEXT: pushl %eax ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: por %xmm0, %xmm1 +; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: orpd %xmm0, %xmm1 ; CHECK-NEXT: subsd %xmm0, %xmm1 ; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0 Index: test/CodeGen/X86/vec_extract-avx.ll =================================================================== --- test/CodeGen/X86/vec_extract-avx.ll +++ test/CodeGen/X86/vec_extract-avx.ll @@ -126,7 +126,7 @@ ; ; X64-LABEL: legal_vzmovl_2i32_8i32: ; X64: # BB#0: -; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] ; X64-NEXT: vmovaps %ymm0, (%rsi) @@ -178,7 +178,7 @@ ; ; X64-LABEL: legal_vzmovl_2f32_8f32: ; X64: # BB#0: -; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] ; X64-NEXT: vmovaps %ymm0, (%rsi) Index: test/CodeGen/X86/vec_extract-mmx.ll =================================================================== --- test/CodeGen/X86/vec_extract-mmx.ll +++ test/CodeGen/X86/vec_extract-mmx.ll @@ -16,7 +16,7 @@ ; X32-NEXT: movl %ecx, (%esp) ; X32-NEXT: pshufw $238, (%esp), %mm0 # mm0 = mem[2,3,2,3] ; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp) -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; X32-NEXT: movd %xmm0, %eax ; X32-NEXT: addl $32, %eax @@ -55,7 +55,7 @@ ; X32-NEXT: movd (%eax), %mm0 ; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3] ; X32-NEXT: movq %mm0, (%esp) -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; X32-NEXT: movd %xmm0, %eax ; X32-NEXT: emms @@ -98,7 +98,7 @@ ; X32-NEXT: movl 8(%ebp), %eax ; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3] ; X32-NEXT: movq %mm0, (%esp) -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; X32-NEXT: movd %xmm0, %eax ; X32-NEXT: emms @@ -149,7 +149,7 @@ ; X32-NEXT: andl $-8, %esp ; X32-NEXT: subl $8, %esp ; X32-NEXT: movq %mm0, (%esp) -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1] ; X32-NEXT: movd %xmm0, %eax ; X32-NEXT: movl %ebp, %esp Index: test/CodeGen/X86/vec_i64.ll =================================================================== --- test/CodeGen/X86/vec_i64.ll +++ test/CodeGen/X86/vec_i64.ll @@ -8,12 +8,12 @@ ; X32-LABEL: foo1: ; X32: # BB#0: # %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: retl ; ; X64-LABEL: foo1: ; X64: # BB#0: # %entry -; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: retq entry: %tmp1 = load i64, i64* %y, align 8 @@ -27,12 +27,12 @@ ; X32-LABEL: foo2: ; X32: # BB#0: # %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: retl ; ; X64-LABEL: foo2: ; X64: # BB#0: # %entry -; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X64-NEXT: retq entry: %load = load i64, i64* %p Index: test/CodeGen/X86/vec_insert-2.ll =================================================================== --- test/CodeGen/X86/vec_insert-2.ll +++ test/CodeGen/X86/vec_insert-2.ll @@ -23,7 +23,7 @@ define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind { ; X32-LABEL: t2: ; X32: # BB#0: -; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0] ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] ; X32-NEXT: retl Index: test/CodeGen/X86/vec_insert-3.ll =================================================================== --- test/CodeGen/X86/vec_insert-3.ll +++ test/CodeGen/X86/vec_insert-3.ll @@ -5,10 +5,10 @@ define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind { ; X32-LABEL: t1: ; X32: # BB#0: -; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0] ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] -; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0] ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] ; X32-NEXT: retl Index: test/CodeGen/X86/vec_insert-mmx.ll =================================================================== --- test/CodeGen/X86/vec_insert-mmx.ll +++ test/CodeGen/X86/vec_insert-mmx.ll @@ -29,7 +29,7 @@ define <8 x i8> @t1(i8 zeroext %x) nounwind { ; X32-LABEL: t1: ; X32: ## BB#0: -; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: retl ; ; X64-LABEL: t1: @@ -65,7 +65,7 @@ ; X32: ## BB#0: ; X32-NEXT: movl L_g0$non_lazy_ptr, %eax ; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; X32-NEXT: movzwl (%eax), %eax ; X32-NEXT: movd %eax, %xmm1 Index: test/CodeGen/X86/vec_int_to_fp.ll =================================================================== --- test/CodeGen/X86/vec_int_to_fp.ll +++ test/CodeGen/X86/vec_int_to_fp.ll @@ -2976,7 +2976,7 @@ ; ; AVX512F-LABEL: uitofp_load_2i32_to_2f64: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0 ; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512F-NEXT: retq @@ -2990,7 +2990,7 @@ ; ; AVX512DQ-LABEL: uitofp_load_2i32_to_2f64: ; AVX512DQ: # BB#0: -; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 ; AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512DQ-NEXT: retq Index: test/CodeGen/X86/vec_set-2.ll =================================================================== --- test/CodeGen/X86/vec_set-2.ll +++ test/CodeGen/X86/vec_set-2.ll @@ -16,7 +16,7 @@ define <2 x i64> @test(i32 %a) nounwind { ; CHECK-LABEL: test: ; CHECK: # BB#0: -; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: retl %tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0 %tmp6 = insertelement <4 x i32> %tmp, i32 0, i32 1 Index: test/CodeGen/X86/vec_set-C.ll =================================================================== --- test/CodeGen/X86/vec_set-C.ll +++ test/CodeGen/X86/vec_set-C.ll @@ -5,7 +5,7 @@ define <2 x i64> @t1(i64 %x) nounwind { ; X32-LABEL: t1: ; X32: # BB#0: -; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: retl ; ; X64-LABEL: t1: Index: test/CodeGen/X86/vec_set-D.ll =================================================================== --- test/CodeGen/X86/vec_set-D.ll +++ test/CodeGen/X86/vec_set-D.ll @@ -4,7 +4,7 @@ define <4 x i32> @t(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: t: ; CHECK: # BB#0: -; CHECK-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: retl %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0 %tmp2 = insertelement <4 x i32> %tmp1, i32 %y, i32 1 Index: test/CodeGen/X86/vec_set-F.ll =================================================================== --- test/CodeGen/X86/vec_set-F.ll +++ test/CodeGen/X86/vec_set-F.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: t1: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: retl %tmp45 = bitcast <2 x i64>* %ptr to <2 x i32>* %tmp615 = load <2 x i32>, <2 x i32>* %tmp45 Index: test/CodeGen/X86/vector-shuffle-128-v2.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-128-v2.ll +++ test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -1001,12 +1001,12 @@ define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) { ; SSE-LABEL: insert_mem_and_zero_v2i64: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: insert_mem_and_zero_v2i64: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq %a = load i64, i64* %ptr %v = insertelement <2 x i64> undef, i64 %a, i32 0 Index: test/CodeGen/X86/vector-shuffle-128-v4.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-128-v4.ll +++ test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -2055,12 +2055,12 @@ define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) { ; SSE-LABEL: insert_mem_and_zero_v4i32: ; SSE: # BB#0: -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: retq ; ; AVX-LABEL: insert_mem_and_zero_v4i32: ; AVX: # BB#0: -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq %a = load i32, i32* %ptr %v = insertelement <4 x i32> undef, i32 %a, i32 0 Index: test/CodeGen/X86/vector-shuffle-256-v4.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-256-v4.ll +++ test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -1187,7 +1187,7 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) { ; ALL-LABEL: insert_mem_and_zero_v4i64: ; ALL: # BB#0: -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: retq %a = load i64, i64* %ptr %v = insertelement <4 x i64> undef, i64 %a, i64 0 Index: test/CodeGen/X86/vector-shuffle-256-v8.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-256-v8.ll +++ test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -2434,7 +2434,7 @@ define <8x float> @concat_v2f32_1(<2 x float>* %tmp64, <2 x float>* %tmp65) { ; ALL-LABEL: concat_v2f32_1: ; ALL: # BB#0: # %entry -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: retq entry: @@ -2449,7 +2449,7 @@ define <8x float> @concat_v2f32_2(<2 x float>* %tmp64, <2 x float>* %tmp65) { ; ALL-LABEL: concat_v2f32_2: ; ALL: # BB#0: # %entry -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: retq entry: @@ -2462,7 +2462,7 @@ define <8x float> @concat_v2f32_3(<2 x float>* %tmp64, <2 x float>* %tmp65) { ; ALL-LABEL: concat_v2f32_3: ; ALL: # BB#0: # %entry -; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: retq entry: @@ -2476,7 +2476,7 @@ define <8 x i32> @insert_mem_and_zero_v8i32(i32* %ptr) { ; ALL-LABEL: insert_mem_and_zero_v8i32: ; ALL: # BB#0: -; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; ALL-NEXT: retq %a = load i32, i32* %ptr %v = insertelement <8 x i32> undef, i32 %a, i32 0 Index: test/CodeGen/X86/vector-shuffle-512-v16.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-512-v16.ll +++ test/CodeGen/X86/vector-shuffle-512-v16.ll @@ -301,7 +301,7 @@ define <16 x i32> @insert_mem_and_zero_v16i32(i32* %ptr) { ; ALL-LABEL: insert_mem_and_zero_v16i32: ; ALL: # BB#0: -; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; ALL-NEXT: retq %a = load i32, i32* %ptr %v = insertelement <16 x i32> undef, i32 %a, i32 0 Index: test/CodeGen/X86/vector-shuffle-combining-xop.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-combining-xop.ll +++ test/CodeGen/X86/vector-shuffle-combining-xop.ll @@ -397,7 +397,7 @@ ; X32-LABEL: PR31296: ; X32: # BB#0: # %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: vmovaps {{.*#+}} xmm1 = <0,1,u,u> ; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0,0,1] ; X32-NEXT: retl Index: test/CodeGen/X86/vector-shuffle-combining.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-combining.ll +++ test/CodeGen/X86/vector-shuffle-combining.ll @@ -1782,13 +1782,13 @@ define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) { ; SSE-LABEL: combine_test22: ; SSE: # BB#0: -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_test22: ; AVX: # BB#0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; AVX-NEXT: retq ; Current AVX2 lowering of this is still awful, not adding a test case. @@ -2818,13 +2818,13 @@ ; SSE-LABEL: combine_scalar_load_with_blend_with_zero: ; SSE: # BB#0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: movapd %xmm0, (%rsi) +; SSE-NEXT: movaps %xmm0, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: combine_scalar_load_with_blend_with_zero: ; AVX: # BB#0: ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX-NEXT: vmovapd %xmm0, (%rsi) +; AVX-NEXT: vmovaps %xmm0, (%rsi) ; AVX-NEXT: retq %1 = load double, double* %a0, align 8 %2 = insertelement <2 x double> undef, double %1, i32 0 Index: test/CodeGen/X86/vector-shuffle-mmx.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-mmx.ll +++ test/CodeGen/X86/vector-shuffle-mmx.ll @@ -8,7 +8,7 @@ ; X32-LABEL: test0: ; X32: ## BB#0: ## %entry ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X32-NEXT: movq %xmm0, (%eax) ; X32-NEXT: retl Index: test/CodeGen/X86/vector-shuffle-variable-256.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-variable-256.ll +++ test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -250,9 +250,9 @@ ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm5 ; AVX2-NEXT: vmovd %r9d, %xmm6 ; AVX2-NEXT: vpermps %ymm0, %ymm6, %ymm6 -; AVX2-NEXT: vmovd {{.*#+}} xmm7 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero ; AVX2-NEXT: vpermps %ymm0, %ymm7, %ymm7 -; AVX2-NEXT: vmovd {{.*#+}} xmm8 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero ; AVX2-NEXT: vpermps %ymm0, %ymm8, %ymm0 ; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3] ; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3] Index: test/CodeGen/X86/vector-zmov.ll =================================================================== --- test/CodeGen/X86/vector-zmov.ll +++ test/CodeGen/X86/vector-zmov.ll @@ -8,12 +8,12 @@ define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) { ; SSE-LABEL: load_zmov_4i32_to_0zzz: ; SSE: # BB#0: # %entry -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: retq ; ; AVX-LABEL: load_zmov_4i32_to_0zzz: ; AVX: # BB#0: # %entry -; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: retq entry: @@ -25,12 +25,12 @@ define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) { ; SSE-LABEL: load_zmov_2i64_to_0z: ; SSE: # BB#0: # %entry -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: retq ; ; AVX-LABEL: load_zmov_2i64_to_0z: ; AVX: # BB#0: # %entry -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: retq entry: Index: test/CodeGen/X86/widen_load-2.ll =================================================================== --- test/CodeGen/X86/widen_load-2.ll +++ test/CodeGen/X86/widen_load-2.ll @@ -195,8 +195,8 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; X86-NEXT: paddw %xmm0, %xmm1 ; X86-NEXT: movq %xmm1, (%eax) ; X86-NEXT: retl $4