Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -22927,6 +22927,27 @@ MinAlign(St->getAlignment(), 4)); return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); } + + // This is similar to the above case, but here we handle a scalar 64-bit + // integer store that is extracted from a vector on a 32-bit target. + // If we have SSE, then we can treat it like a floating-point double + // to get past legalization. The execution dependencies fixup pass will + // choose the optimal machine instruction for the store if this really is + // an integer or v2f32 rather than an f64. + if (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit() && + St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + SDValue OldExtract = St->getOperand(1); + SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, + DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, + OldExtract.getOperand(0)), + OldExtract.getOperand(1)); + return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(), + St->getPointerInfo(), + St->isVolatile(), + St->isNonTemporal(), + St->getAlignment()); + } + return SDValue(); } Index: test/CodeGen/X86/i64-mem-copy.ll =================================================================== --- test/CodeGen/X86/i64-mem-copy.ll +++ test/CodeGen/X86/i64-mem-copy.ll @@ -18,9 +18,31 @@ ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movsd %xmm0, (%eax) ; X32-NEXT: retl - %tmp1 = load i64, i64* %y, align 8 store i64 %tmp1, i64* %x, align 8 ret void } +; Verify that a 64-bit chunk extracted from a vector is stored with a movq +; regardless of whether the system is 64-bit. + +define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) { +; X64-LABEL: store_i64_from_vector: +; X64: # BB#0: +; X64-NEXT: paddw %xmm1, %xmm0 +; X64-NEXT: movq %xmm0, (%rdi) +; X64-NEXT: retq +; +; X32-LABEL: store_i64_from_vector: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: paddw %xmm1, %xmm0 +; X32-NEXT: movq %xmm0, (%eax) +; X32-NEXT: retl + %z = add <8 x i16> %x, %y ; force execution domain + %bc = bitcast <8 x i16> %z to <2 x i64> + %vecext = extractelement <2 x i64> %bc, i32 0 + store i64 %vecext, i64* %i, align 8 + ret void +} +