Index: lib/Transforms/Vectorize/SLPVectorizer.cpp
===================================================================
--- lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -439,6 +439,13 @@
   /// \returns true if the memory operations A and B are consecutive.
   bool isConsecutiveAccess(Value *A, Value *B);
 
+  /// For consecutive loads (+(+ v0, v1)(+ v2, v3)), Left had v0 and v2
+  /// while Right had v1 and v3, which prevented bundling them into
+  /// a vector of loads. Reorder them so that Left now has v0 and v1
+  /// while Right has v2 and v3 enabling their bundling into a vector.
+  void reorderIfConsecutiveLoads(SmallVectorImpl<Value *> &Left,
+                                 SmallVectorImpl<Value *> &Right);
+
   /// \brief Perform LICM and CSE on the newly generated gather sequences.
   void optimizeGatherSequence();
 
@@ -1234,6 +1241,7 @@
       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
         ValueList Left, Right;
         reorderInputsAccordingToOpcode(VL, Left, Right);
+        reorderIfConsecutiveLoads(Left, Right);
         buildTree_rec(Left, Depth + 1);
         buildTree_rec(Right, Depth + 1);
         return;
@@ -1818,6 +1826,22 @@
   return X == PtrSCEVB;
 }
 
+void BoUpSLP::reorderIfConsecutiveLoads(SmallVectorImpl<Value *> &Left,
+                                        SmallVectorImpl<Value *> &Right) {
+  for (unsigned i = 0, e = Left.size(); i < e - 1; ++i) {
+    if (!isa<LoadInst>(Left[i]) || !isa<LoadInst>(Right[i]))
+      return;
+    LoadInst *L = dyn_cast<LoadInst>(Left[i]);
+    bool isInt = L->getType()->isIntegerTy();
+    if (!isInt)
+      return;
+    if (!isConsecutiveAccess(Left[i], Right[i]))
+      continue;
+    else
+      std::swap(Left[i + 1], Right[i]);
+  }
+}
+
 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
   Instruction *VL0 = cast<Instruction>(VL[0]);
   BasicBlock::iterator NextInst = VL0;
@@ -2048,8 +2072,10 @@
     case Instruction::Or:
     case Instruction::Xor: {
       ValueList LHSVL, RHSVL;
-      if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
+      if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
         reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
+        reorderIfConsecutiveLoads(LHSVL, RHSVL);
+      }
       else
         for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
           LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
Index: test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll
===================================================================
--- test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll
+++ test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -mcpu=cortex-a57 | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; float hadd (float *a) {
+;   return (a[0] + a[1]) + (a[2] + a[3]);
+; }
+
+; CHECK-LABEL: @hadd
+; CHECK-NOT: load <2 x float>*
+; CHECK-NOT: fadd <2 x float>
+; CHECK-NOT: extractelement <2 x float>
+ 
+define float @hadd(float* nocapture readonly %a) {
+entry:
+  %0 = load float* %a, align 4
+  %arrayidx1 = getelementptr inbounds float* %a, i64 1
+  %1 = load float* %arrayidx1, align 4
+  %add = fadd float %0, %1
+  %arrayidx2 = getelementptr inbounds float* %a, i64 2
+  %2 = load float* %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds float* %a, i64 3
+  %3 = load float* %arrayidx3, align 4
+  %add4 = fadd float %2, %3
+  %add5 = fadd float %add, %add4
+  ret float %add5
+}
+
+; CHECK-LABEL: @hadd_int
+; CHECK: load <2 x i32>*
+; CHECK: add <2 x i32>
+; CHECK: extractelement <2 x i32>
+define i32 @hadd_int(i32* nocapture readonly %a) {
+entry:
+  %0 = load i32* %a, align 4
+  %arrayidx1 = getelementptr inbounds i32* %a, i64 1
+  %1 = load i32* %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32* %a, i64 2
+  %2 = load i32* %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32* %a, i64 3
+  %3 = load i32* %arrayidx3, align 4
+  %arrayidx6 = getelementptr inbounds i32* %a, i64 4
+  %4 = load i32* %arrayidx6, align 4
+  %arrayidx7 = getelementptr inbounds i32* %a, i64 5
+  %5 = load i32* %arrayidx7, align 4
+  %arrayidx10 = getelementptr inbounds i32* %a, i64 6
+  %6 = load i32* %arrayidx10, align 4
+  %arrayidx11 = getelementptr inbounds i32* %a, i64 7
+  %7 = load i32* %arrayidx11, align 4
+  %add1 = add i32 %0, %1
+  %add2 = add i32 %2, %3
+  %add3 = add i32 %4, %5
+  %add4 = add i32 %6, %7
+  %add5 = add i32 %add1, %add2
+  %add6 = add i32 %add3, %add4
+  %add7 = add i32 %add5, %add6
+  ret i32 %add7
+}