Index: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
===================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h
@@ -3263,6 +3263,8 @@
     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
 
+    bool recursivelyDeleteUnusedNodes(SDNode *N);
+
     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
   };
 
Index: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -761,6 +761,11 @@
   return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
 }
 
+bool TargetLowering::DAGCombinerInfo::
+recursivelyDeleteUnusedNodes(SDNode *N) {
+  return ((DAGCombiner*)DC)->recursivelyDeleteUnusedNodes(N);
+}
+
 void TargetLowering::DAGCombinerInfo::
 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
   return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
@@ -33429,8 +33429,19 @@
     if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
 
+    // Share broadcast with the longest vector and extract low subvector (free).
+    for (SDNode *User : Src->uses())
+      if (User != N.getNode() &&
+          (User->getOpcode() == X86ISD::VBROADCAST ||
+           User->getOpcode() == X86ISD::VBROADCAST_LOAD) &&
+          User->getValueSizeInBits(0) > VT.getSizeInBits()) {
+        return extractSubVector(SDValue(User, 0), 0, DAG, DL,
+                                VT.getSizeInBits());
+      }
+
     // vbroadcast(scalarload X) -> vbroadcast_load X
-    if (!SrcVT.isVector() && Src.hasOneUse() &&
+    // For float loads, extract other uses of the scalar from the broadcast.
+    if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
         ISD::isNormalLoad(Src.getNode())) {
       LoadSDNode *LN = cast<LoadSDNode>(Src);
       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
@@ -33438,17 +33449,19 @@
       SDValue BcastLd =
           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
                                   LN->getMemoryVT(), LN->getMemOperand());
-      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
-      return BcastLd;
-    }
-
-    // Share broadcast with the longest vector and extract low subvector (free).
-    for (SDNode *User : Src->uses())
-      if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
-          User->getValueSizeInBits(0) > VT.getSizeInBits()) {
-        return extractSubVector(SDValue(User, 0), 0, DAG, DL,
-                                VT.getSizeInBits());
+      // If the load value is used only by N, replace it via CombineTo N.
+      bool NoReplaceExtract = Src.hasOneUse();
+      DCI.CombineTo(N.getNode(), BcastLd);
+      if (NoReplaceExtract) {
+        DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
+        DCI.recursivelyDeleteUnusedNodes(LN);
+      } else {
+        SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
+                                  DAG.getIntPtrConstant(0, DL));
+        DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
       }
+      return N; // Return N so it doesn't get rechecked!
+    }
 
     return SDValue();
   }
Index: llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
===================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
+++ llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
@@ -159,18 +159,14 @@
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    vmovsd %xmm0, (%eax)
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X32-NEXT:    vmovlps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: C2:
 ; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    vmovsd %xmm0, (%rsi)
-; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
+; X64-NEXT:    vmovlps %xmm0, (%rsi)
 ; X64-NEXT:    retq
 entry:
   %q = load double, double* %ptr, align 8
@@ -231,18 +227,14 @@
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    vbroadcastss (%ecx), %ymm0
 ; X32-NEXT:    vmovss %xmm0, (%eax)
-; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: D3:
 ; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    vmovss %xmm0, (%rsi)
-; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-NEXT:    retq
 entry:
   %q = load float, float* %ptr, align 4
@@ -285,16 +277,14 @@
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    vbroadcastss (%ecx), %xmm0
 ; X32-NEXT:    vmovss %xmm0, (%eax)
-; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: e2:
 ; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    vmovss %xmm0, (%rsi)
-; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
 entry:
   %q = load float, float* %ptr, align 4
@@ -669,16 +659,14 @@
 ; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    vmovsd %xmm0, (%eax)
-; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X32-NEXT:    vmovlps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: I2:
 ; X64:       ## %bb.0: ## %entry
-; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    vmovsd %xmm0, (%rsi)
-; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT:    vmovlps %xmm0, (%rsi)
 ; X64-NEXT:    retq
 entry:
   %q = load double, double* %ptr, align 4
@@ -884,7 +872,6 @@
 
 ;
 ; Broadcast scale factor for xyz vector - slp will have vectorized xy.
-; FIXME: Load as a broadcast and then use the scalar 0'th element.
 ;
 define double @broadcast_scale_xyz(double* nocapture readonly, double* nocapture readonly) nounwind {
 ; X32-LABEL: broadcast_scale_xyz:
@@ -892,9 +879,8 @@
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; X32-NEXT:    vmulpd (%eax), %xmm1, %xmm1
+; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X32-NEXT:    vmulpd (%eax), %xmm0, %xmm1
 ; X32-NEXT:    vmulsd 16(%eax), %xmm0, %xmm0
 ; X32-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
 ; X32-NEXT:    vaddsd %xmm2, %xmm1, %xmm1
@@ -906,9 +892,8 @@
 ;
 ; X64-LABEL: broadcast_scale_xyz:
 ; X64:       ## %bb.0:
-; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; X64-NEXT:    vmulpd (%rsi), %xmm1, %xmm1
+; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT:    vmulpd (%rsi), %xmm0, %xmm1
 ; X64-NEXT:    vmulsd 16(%rsi), %xmm0, %xmm0
 ; X64-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
 ; X64-NEXT:    vaddsd %xmm2, %xmm1, %xmm1