Index: lib/Transforms/Vectorize/SLPVectorizer.cpp
===================================================================
--- lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -263,104 +263,6 @@
   return true;
 }
 
-static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
-                                           SmallVectorImpl<Value *> &Left,
-                                           SmallVectorImpl<Value *> &Right) {
-
-  SmallVector<Value *, 16> OrigLeft, OrigRight;
-
-  bool AllSameOpcodeLeft = true;
-  bool AllSameOpcodeRight = true;
-  for (unsigned i = 0, e = VL.size(); i != e; ++i) {
-    Instruction *I = cast<Instruction>(VL[i]);
-    Value *V0 = I->getOperand(0);
-    Value *V1 = I->getOperand(1);
-
-    OrigLeft.push_back(V0);
-    OrigRight.push_back(V1);
-
-    Instruction *I0 = dyn_cast<Instruction>(V0);
-    Instruction *I1 = dyn_cast<Instruction>(V1);
-
-    // Check whether all operands on one side have the same opcode. In this case
-    // we want to preserve the original order and not make things worse by
-    // reordering.
-    AllSameOpcodeLeft = I0;
-    AllSameOpcodeRight = I1;
-
-    if (i && AllSameOpcodeLeft) {
-      if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) {
-        if(P0->getOpcode() != I0->getOpcode())
-          AllSameOpcodeLeft = false;
-      } else
-        AllSameOpcodeLeft = false;
-    }
-    if (i && AllSameOpcodeRight) {
-      if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) {
-        if(P1->getOpcode() != I1->getOpcode())
-          AllSameOpcodeRight = false;
-      } else
-        AllSameOpcodeRight = false;
-    }
-
-    // Sort two opcodes. In the code below we try to preserve the ability to use
-    // broadcast of values instead of individual inserts.
-    // vl1 = load
-    // vl2 = phi
-    // vr1 = load
-    // vr2 = vr2
-    //    = vl1 x vr1
-    //    = vl2 x vr2
-    // If we just sorted according to opcode we would leave the first line in
-    // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load).
-    //    = vl1 x vr1
-    //    = vr2 x vl2
-    // Because vr2 and vr1 are from the same load we loose the opportunity of a
-    // broadcast for the packed right side in the backend: we have [vr1, vl2]
-    // instead of [vr1, vr2=vr1].
-    if (I0 && I1) {
-       if(!i && I0->getOpcode() > I1->getOpcode()) {
-         Left.push_back(I1);
-         Right.push_back(I0);
-       } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) {
-         // Try not to destroy a broad cast for no apparent benefit.
-         Left.push_back(I1);
-         Right.push_back(I0);
-       } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] ==  I0) {
-         // Try preserve broadcasts.
-         Left.push_back(I1);
-         Right.push_back(I0);
-       } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) {
-         // Try preserve broadcasts.
-         Left.push_back(I1);
-         Right.push_back(I0);
-       } else {
-         Left.push_back(I0);
-         Right.push_back(I1);
-       }
-       continue;
-    }
-    // One opcode, put the instruction on the right.
-    if (I0) {
-      Left.push_back(V1);
-      Right.push_back(I0);
-      continue;
-    }
-    Left.push_back(V0);
-    Right.push_back(V1);
-  }
-
-  bool LeftBroadcast = isSplat(Left);
-  bool RightBroadcast = isSplat(Right);
-
-  // Don't reorder if the operands where good to begin with.
-  if (!(LeftBroadcast || RightBroadcast) &&
-      (AllSameOpcodeRight || AllSameOpcodeLeft)) {
-    Left = OrigLeft;
-    Right = OrigRight;
-  }
-}
-
 /// \returns True if in-tree use also needs extract. This refers to
 /// possible scalar operand in vectorized instruction.
 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
@@ -439,6 +341,12 @@
   /// \returns true if the memory operations A and B are consecutive.
   bool isConsecutiveAccess(Value *A, Value *B);
 
+  void reorderAltShuffleOperands(ArrayRef<Value *> VL,
+                                 SmallVectorImpl<Value *> &Left,
+                                 SmallVectorImpl<Value *> &Right);
+  void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
+                                      SmallVectorImpl<Value *> &Left,
+                                      SmallVectorImpl<Value *> &Right);
   /// \brief Perform LICM and CSE on the newly generated gather sequences.
   void optimizeGatherSequence();
 
@@ -1381,6 +1289,16 @@
       }
       newTreeEntry(VL, true);
       DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
+
+      // Reorder operands if reordering would enable vectorization.
+      if (isa<BinaryOperator>(VL0)) {
+        ValueList Left, Right;
+        reorderAltShuffleOperands(VL, Left, Right);
+        buildTree_rec(Left, Depth + 1);
+        buildTree_rec(Right, Depth + 1);
+        return;
+      }
+
       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
         ValueList Operands;
         // Prepare the operand vector.
@@ -1818,6 +1736,166 @@
   return X == PtrSCEVB;
 }
 
+// Reorder commutative operations in alternate shuffle if the resulting vectors
+// are consecutive loads. This would allow us to vectorize the tree.
+// If we have something like-
+// load a[0] - load b[0]
+// load b[1] + load a[1]
+// load a[2] - load b[2]
+// load a[3] + load b[3]
+// Reordering the second load b[1]  load a[1] would allow us to vectorize this
+// code.
+void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
+                                        SmallVectorImpl<Value *> &Left,
+                                        SmallVectorImpl<Value *> &Right) {
+
+  // Push left and right operands of binary operation into Left and Right
+  for (unsigned i = 0, e = VL.size(); i < e; ++i) {
+    Left.push_back(cast<Instruction>(VL[i])->getOperand(0));
+    Right.push_back(cast<Instruction>(VL[i])->getOperand(1));
+  }
+
+  // Reorder if we have a commutative operation and consecutive access
+  // are on either side of the alternate instructions.
+  for (unsigned j = 0; j < VL.size() - 1; ++j) {
+    if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
+      if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
+        Instruction *VL1 = cast<Instruction>(VL[j]);
+        Instruction *VL2 = cast<Instruction>(VL[j + 1]);
+        if (isConsecutiveAccess(L, L1) && VL1->isCommutative())
+          std::swap(Left[j], Right[j]);
+        else if (isConsecutiveAccess(L, L1) && VL2->isCommutative())
+          std::swap(Left[j + 1], Right[j + 1]);
+        // else unchanged
+      }
+    }
+  }
+}
+
+void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
+                                             SmallVectorImpl<Value *> &Left,
+                                             SmallVectorImpl<Value *> &Right) {
+
+  SmallVector<Value *, 16> OrigLeft, OrigRight;
+
+  bool AllSameOpcodeLeft = true;
+  bool AllSameOpcodeRight = true;
+  bool needsReordering = false;
+  for (unsigned i = 0, e = VL.size(); i != e; ++i) {
+    Instruction *I = cast<Instruction>(VL[i]);
+    Value *V0 = I->getOperand(0);
+    Value *V1 = I->getOperand(1);
+
+    OrigLeft.push_back(V0);
+    OrigRight.push_back(V1);
+
+    Instruction *I0 = dyn_cast<Instruction>(V0);
+    Instruction *I1 = dyn_cast<Instruction>(V1);
+
+    // Check whether all operands on one side have the same opcode. In this case
+    // we want to preserve the original order and not make things worse by
+    // reordering.
+    AllSameOpcodeLeft = I0;
+    AllSameOpcodeRight = I1;
+
+    if (i && AllSameOpcodeLeft && I0) {
+      if (Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i - 1])) {
+        if (P0->getOpcode() != I0->getOpcode())
+          AllSameOpcodeLeft = false;
+      } else
+        AllSameOpcodeLeft = false;
+    }
+    if (i && AllSameOpcodeRight && I1) {
+      if (Instruction *P1 = dyn_cast<Instruction>(OrigRight[i - 1])) {
+        if (P1->getOpcode() != I1->getOpcode())
+          AllSameOpcodeRight = false;
+      } else
+        AllSameOpcodeRight = false;
+    }
+
+    // Sort two opcodes. In the code below we try to preserve the ability to use
+    // broadcast of values instead of individual inserts.
+    // vl1 = load
+    // vl2 = phi
+    // vr1 = load
+    // vr2 = vr2
+    //    = vl1 x vr1
+    //    = vl2 x vr2
+    // If we just sorted according to opcode we would leave the first line in
+    // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load).
+    //    = vl1 x vr1
+    //    = vr2 x vl2
+    // Because vr2 and vr1 are from the same load we loose the opportunity of a
+    // broadcast for the packed right side in the backend: we have [vr1, vl2]
+    // instead of [vr1, vr2=vr1].
+    if (I0 && I1) {
+      if (!i && I0->getOpcode() > I1->getOpcode()) {
+        Left.push_back(I1);
+        Right.push_back(I0);
+      } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i - 1] != I1) {
+        // Try not to destroy a broad cast for no apparent benefit.
+        Left.push_back(I1);
+        Right.push_back(I0);
+      } else if (i && I0->getOpcode() == I1->getOpcode() &&
+                 Right[i - 1] == I0) {
+        // Try preserve broadcasts.
+        Left.push_back(I1);
+        Right.push_back(I0);
+      } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i - 1] == I1) {
+        // Try preserve broadcasts.
+        Left.push_back(I1);
+        Right.push_back(I0);
+      } else {
+        Left.push_back(I0);
+        Right.push_back(I1);
+      }
+      continue;
+    }
+    // One opcode, put the instruction on the right.
+    if (I0) {
+      Left.push_back(V1);
+      Right.push_back(I0);
+      continue;
+    }
+    Left.push_back(V0);
+    Right.push_back(V1);
+  }
+
+  // Reorder operands of commutative operations if the resulting vectors are
+  // consecutive loads
+  // and are not already part of preserving broadcast obtained from above.
+  // If we have something like-
+  // load a[0]  load b[0]
+  // load b[1]  load a[1]
+  // load a[2]  load b[2]
+  // load a[3]  load b[3]
+  // Reordering the second load b[1]  load a[1] would allow us to vectorize this
+  // code.
+  for (unsigned j = 0; j < VL.size() - 1; ++j) {
+    if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
+      if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
+        // Maintain order while reordering. Always reorder the later operation
+        // in the tree this prevents us from swapping already swapped elements
+        if (isConsecutiveAccess(L, L1) &&
+            !(Left[j] == Left[j + 1] || Right[j] == Right[j + 1])) {
+          std::swap(Left[j + 1], Right[j + 1]);
+          needsReordering = true;
+        }
+        // else unchanged
+      }
+    }
+  }
+
+  bool LeftBroadcast = isSplat(Left);
+  bool RightBroadcast = isSplat(Right);
+  // Don't reorder if the operands where good to begin with.
+  if (!(LeftBroadcast || RightBroadcast) &&
+      (AllSameOpcodeRight || AllSameOpcodeLeft) && !needsReordering) {
+    Left = OrigLeft;
+    Right = OrigRight;
+  }
+}
+
 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
   Instruction *VL0 = cast<Instruction>(VL[0]);
   BasicBlock::iterator NextInst = VL0;
@@ -2214,9 +2292,13 @@
     }
     case Instruction::ShuffleVector: {
       ValueList LHSVL, RHSVL;
-      for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
-        LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
-        RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
+      if (isa<BinaryOperator>(VL0))
+        reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
+      else {
+        for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
+          LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
+          RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
+        }
       }
       setInsertPointAfterBundle(E->Scalars);
 
Index: test/Transforms/SLPVectorizer/X86/addsub.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/addsub.ll
+++ test/Transforms/SLPVectorizer/X86/addsub.ll
@@ -10,6 +10,7 @@
 @fb = common global [4 x float] zeroinitializer, align 16
 @fc = common global [4 x float] zeroinitializer, align 16
 @fa = common global [4 x float] zeroinitializer, align 16
+@fd = common global [4 x float] zeroinitializer, align 16
 
 ; CHECK-LABEL: @addsub
 ; CHECK: %5 = add nsw <4 x i32> %3, %4
@@ -177,5 +178,90 @@
   ret void
 }
 
+
+; CHECK-LABEL: @reorder_alt
+; CHECK: %3 = fadd <4 x float> %1, %2
+; CHECK: %4 = fsub <4 x float> %1, %2
+; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+define void @reorder_alt() #0 {
+  %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+  %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+  %3 = fadd float %1, %2
+  store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+  %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+  %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+  %6 = fsub float %4, %5
+  store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+  %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+  %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+  %9 = fadd float %7, %8
+  store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+  %10 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+  %11 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+  %12 = fsub float %10, %11
+  store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+  ret void
+}
+
+; CHECK-LABEL: @reorder_alt_subTree
+; CHECK: %4 = fsub <4 x float> %3, %2
+; CHECK: %5 = fadd <4 x float> %3, %2
+; CHECK: %6 = shufflevector <4 x float> %4, <4 x float> %5, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK: %7 = fadd <4 x float> %1, %6
+; CHECK: %8 = fsub <4 x float> %1, %6
+; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+define void @reorder_alt_subTree() #0 {
+  %1 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+  %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+  %3 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 0), align 4
+  %4 = fsub float %2, %3
+  %5 = fadd float %1, %4
+  store float %5, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+  %6 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+  %7 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+  %8 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 1), align 4
+  %9 = fadd float %7, %8
+  %10 = fsub float %6, %9
+  store float %10, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+  %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+  %12 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+  %13 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 2), align 4
+  %14 = fsub float %12, %13
+  %15 = fadd float %11, %14
+  store float %15, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+  %16 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+  %17 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 3), align 4
+  %18 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+  %19 = fadd float %17, %18
+  %20 = fsub float %16, %19
+  store float %20, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+  ret void
+}
+
+; CHECK-LABEL: @no_vec_shuff_reorder
+; CHECK-NOT: fadd <4 x float>
+; CHECK-NOT: fsub <4 x float>
+; CHECK-NOT: shufflevector
+define void @no_vec_shuff_reorder() #0 {
+  %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+  %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+  %3 = fadd float %1, %2
+  store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+  %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+  %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+  %6 = fsub float %4, %5
+  store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+  %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+  %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+  %9 = fadd float %7, %8
+  store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+  %10 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+  %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+  %12 = fsub float %10, %11
+  store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+  ret void
+}
+
+
 attributes #0 = { nounwind }
 
Index: test/Transforms/SLPVectorizer/X86/operandorder.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/operandorder.ll
+++ test/Transforms/SLPVectorizer/X86/operandorder.ll
@@ -232,3 +232,53 @@
 for.end:
   ret void
 }
+
+; CHECK-LABEL: load_reorder_double
+; CHECK: load <2 x double>*
+; CHECK: fadd <2 x double>
+define void @load_reorder_double(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b){
+  %1 = load double* %a
+  %2 = load double* %b
+  %3 = fadd double %1, %2
+  store double %3, double* %c
+  %4 = getelementptr inbounds double* %b, i64 1
+  %5 = load double* %4
+  %6 = getelementptr inbounds double* %a, i64 1
+  %7 = load double* %6
+  %8 = fadd double %5, %7
+  %9 = getelementptr inbounds double* %c, i64 1
+  store double %8, double* %9
+  ret void
+}
+
+; CHECK-LABEL: load_reorder_float
+; CHECK: load <4 x float>*
+; CHECK: fadd <4 x float>
+define void @load_reorder_float(float* nocapture %c, float* noalias nocapture readonly %a, float* noalias nocapture readonly %b){
+  %1 = load float* %a
+  %2 = load float* %b
+  %3 = fadd float %1, %2
+  store float %3, float* %c
+  %4 = getelementptr inbounds float* %b, i64 1
+  %5 = load float* %4
+  %6 = getelementptr inbounds float* %a, i64 1
+  %7 = load float* %6
+  %8 = fadd float %5, %7
+  %9 = getelementptr inbounds float* %c, i64 1
+  store float %8, float* %9
+  %10 = getelementptr inbounds float* %a, i64 2
+  %11 = load float* %10
+  %12 = getelementptr inbounds float* %b, i64 2
+  %13 = load float* %12
+  %14 = fadd float %11, %13
+  %15 = getelementptr inbounds float* %c, i64 2
+  store float %14, float* %15
+  %16 = getelementptr inbounds float* %a, i64 3
+  %17 = load float* %16
+  %18 = getelementptr inbounds float* %b, i64 3
+  %19 = load float* %18
+  %20 = fadd float %17, %19
+  %21 = getelementptr inbounds float* %c, i64 3
+  store float %20, float* %21
+  ret void
+}