Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -22927,6 +22927,29 @@ MinAlign(St->getAlignment(), 4)); return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); } + + // This is similar to the above case, but here we handle a scalar 64-bit + // integer store that is extracted from a vector on a 32-bit target. + // If we have SSE, then we can treat it like a floating-point double + // to get past legalization. The execution dependencies fixup pass will + // choose the optimal machine instruction for the store if this really is + // an integer or v2f32 rather than an f64. + if (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit() && + St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + SDValue OldExtract = St->getOperand(1); + SDValue ExtOp0 = OldExtract.getOperand(0); + unsigned VecSize = ExtOp0.getValueSizeInBits(); + MVT VecVT = MVT::getVectorVT(MVT::f64, VecSize / 64); + SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtOp0); + SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, + BitCast, OldExtract.getOperand(1)); + return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(), + St->getPointerInfo(), + St->isVolatile(), + St->isNonTemporal(), + St->getAlignment()); + } + return SDValue(); } Index: test/CodeGen/X86/i64-mem-copy.ll =================================================================== --- test/CodeGen/X86/i64-mem-copy.ll +++ test/CodeGen/X86/i64-mem-copy.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X32AVX ; Use movq or movsd to load / store i64 values if sse2 is available. ; rdar://6659858 @@ -18,9 +19,46 @@ ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movsd %xmm0, (%eax) ; X32-NEXT: retl - %tmp1 = load i64, i64* %y, align 8 store i64 %tmp1, i64* %x, align 8 ret void } +; Verify that a 64-bit chunk extracted from a vector is stored with a movq +; regardless of whether the system is 64-bit. + +define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) { +; X64-LABEL: store_i64_from_vector: +; X64: # BB#0: +; X64-NEXT: paddw %xmm1, %xmm0 +; X64-NEXT: movq %xmm0, (%rdi) +; X64-NEXT: retq +; +; X32-LABEL: store_i64_from_vector: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: paddw %xmm1, %xmm0 +; X32-NEXT: movq %xmm0, (%eax) +; X32-NEXT: retl + %z = add <8 x i16> %x, %y ; force execution domain + %bc = bitcast <8 x i16> %z to <2 x i64> + %vecext = extractelement <2 x i64> %bc, i32 0 + store i64 %vecext, i64* %i, align 8 + ret void +} + +define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, i64* %i) { +; X32AVX-LABEL: store_i64_from_vector256: +; X32AVX: # BB#0: +; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 +; X32AVX-NEXT: vmovq %xmm0, (%eax) +; X32AVX-NEXT: vzeroupper +; X32AVX-NEXT: retl + %z = add <16 x i16> %x, %y ; force execution domain + %bc = bitcast <16 x i16> %z to <4 x i64> + %vecext = extractelement <4 x i64> %bc, i32 0 + store i64 %vecext, i64* %i, align 8 + ret void +} +