Index: lib/Transforms/Vectorize/SLPVectorizer.cpp
===================================================================
--- lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -115,8 +115,17 @@
                               "number "));
 
 static cl::opt<bool>
-ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
-                   cl::desc("Attempt to vectorize horizontal reductions"));
+    ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
+                       cl::desc("Attempt to vectorize horizontal reductions"));
+
+static cl::opt<bool>
+    SLPThrottling("slp-throttling", cl::init(true), cl::Hidden,
+                  cl::desc("Enable tree partial vectorize with throttling"));
+
+static cl::opt<int>
+    MaxCostsRecalculations("slp-throttling-budget", cl::init(64), cl::Hidden,
+                           cl::desc("Limit the total number of nodes for cost "
+                                    "recalculations during throttling"));
 
 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
@@ -532,9 +541,27 @@
   /// holding live values over call sites.
   int getSpillCost() const;
 
+  /// \returns the cost extracting vectorized elements.
+  int getExtractCost();
+
+  /// \returns the cost of gathering canceled elements to be used
+  /// by vectorized  operations during throttling.
+  int getInsertCost();
+
+  /// Does a BFS for non-gathering nodes of SLP graph.
+  void treeTraversal(SmallVectorImpl<unsigned> &Leaves,
+                     SmallVectorImpl<unsigned> &Branches);
+
+  /// Cut given path until it might be good to vectorize.
+  int cutPath(int &Cost, SmallVectorImpl<unsigned> &Path,
+              SmallVectorImpl<unsigned> &VecNodes);
+
+  /// Find the subtree of the whole tree suitable to be vectorized.
+  int findSubTree(int Cost);
+
   /// \returns the vectorization cost of the subtree that starts at \p VL.
   /// A negative number means that this is profitable.
-  int getTreeCost();
+  int getTreeCost(bool CutTree = false);
 
   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
@@ -555,6 +582,8 @@
     ScalarToTreeEntry.clear();
     MustGather.clear();
     ExternalUses.clear();
+    InternalTreeUses.clear();
+    RemovedOperations.clear();
     NumOpsWantToKeepOrder.clear();
     NumOpsWantToKeepOriginalOrder = 0;
     for (auto &Iter : BlocksSchedules) {
@@ -562,6 +591,10 @@
       BS->clear();
     }
     MinBWs.clear();
+    UseTreeIndices.clear();
+    ScalarsToVec.clear();
+    VecToScalars.clear();
+    CostsRecalculations = 0;
   }
 
   unsigned getTreeSize() const { return VectorizableTree.size(); }
@@ -605,6 +638,9 @@
     return MinVecRegSize;
   }
 
+  /// Save seed instructions to try partially vectorize later.
+  void recordSeeds(ArrayRef<Value *> Ops);
+
   /// Check if ArrayType or StructType is isomorphic to some VectorType.
   ///
   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
@@ -614,6 +650,16 @@
   /// vectorizable. We do not vectorize such trees.
   bool isTreeTinyAndNotFullyVectorizable() const;
 
+  /// Estimate the subtree cut of the whole tree for partial vectorization
+  /// not just from a cost perspective.
+  bool isGoodTreeCut(ArrayRef<unsigned> VecNodes);
+
+  /// Cut the tree to make it partially vectorizable.
+  bool cutTree(ArrayRef<unsigned> VecNodes);
+
+  /// Try partially vectorize the tree via throttling.
+  bool tryPartialVectorization();
+
   OptimizationRemarkEmitter *getORE() { return ORE; }
 
   /// This structure holds any data we need about the edges being traversed
@@ -1169,6 +1215,13 @@
     /// Does this entry require reordering?
     ArrayRef<unsigned> ReorderIndices;
 
+    /// A predecessor node along the way to this node for non-gathering tree
+    /// members only.
+    unsigned NonGatherPred = 0;
+
+    /// Cost of this tree entry.
+    int Cost = 0;
+
     /// Points back to the VectorizableTree.
     ///
     /// Only used for Graphviz right now.  Unfortunately GraphTrait::NodeRef has
@@ -1324,6 +1377,16 @@
   /// Maps a specific scalar to its tree entry.
   SmallDenseMap<Value*, int> ScalarToTreeEntry;
 
+  /// Tree entries that should not be vectorized due to throttling.
+  SmallVector<int, 2> RemovedOperations;
+
+  /// Tree values proposed to be vectorized.
+  ValueSet ScalarsToVec;
+
+  /// Tree values once considered to be vectorized, but later with throttling
+  /// decided to stay in a scalar form.
+  ValueSet VecToScalars;
+
   /// A list of scalars that we found that we need to keep as scalars.
   ValueSet MustGather;
 
@@ -1343,6 +1406,9 @@
   };
   using UserList = SmallVector<ExternalUser, 16>;
 
+  /// \Returns the cost of extracting the vectorized elements.
+  int getExtractOperationCost(ExternalUser &EU);
+
   /// Checks if two instructions may access the same memory.
   ///
   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
@@ -1394,6 +1460,17 @@
   /// after vectorization.
   UserList ExternalUses;
 
+  /// List of all seeds instructions, we could try to vectorize those seed
+  /// instructions with partial vectorization.
+  SmallVector<std::unique_ptr<SmallVector<Value *, 8>>, 2> Seeds;
+
+  /// Number of times in nodes that we already recalulated cost of
+  /// the subtree during throtteling.
+  int CostsRecalculations = 0;
+
+  /// Internal tree proposed to vectorized values use in that tree.
+  SmallDenseMap<Value *, UserList> InternalTreeUses;
+
   /// Values used only by @llvm.assume calls.
   SmallPtrSet<const Value *, 32> EphValues;
 
@@ -1403,6 +1480,9 @@
   /// A list of blocks that we are going to CSE.
   SetVector<BasicBlock *> CSEBlocks;
 
+  /// The index containing the use of this entry by other entries.
+  SmallDenseMap<unsigned, SmallVector<unsigned, 2>> UseTreeIndices;
+
   /// Contains all scheduling relevant data for an instruction.
   /// A ScheduleData either represents a single instruction or a member of an
   /// instruction bundle (= a group of instructions which is combined into a
@@ -1752,6 +1832,9 @@
   /// Attaches the BlockScheduling structures to basic blocks.
   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
 
+  /// Remove operations from the list of proposed to schedule.
+  void removeFromScheduling(BlockScheduling *BS);
+
   /// Performs the "real" scheduling. Done before vectorization is actually
   /// performed in a basic block.
   void scheduleBlock(BlockScheduling *BS);
@@ -1973,6 +2056,7 @@
             LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
                               << ".\n");
             assert(!UseEntry->NeedToGather && "Bad state");
+            InternalTreeUses[U].push_back(ExternalUser(Scalar, U, FoundLane));
             continue;
           }
         }
@@ -2578,6 +2662,108 @@
   }
 }
 
+bool BoUpSLP::isGoodTreeCut(ArrayRef<unsigned> VecNodes) {
+  // Find the first entry with worth operations to vectorize.
+  if (VecNodes.size() <= 2)
+    return false;
+  for (unsigned N : VecNodes) {
+    InstructionsState S = getSameOpcode(VectorizableTree[N]->Scalars);
+    auto *Inst = S.MainOp;
+    if (Inst && (isa<BinaryOperator>(Inst) || isa<FPMathOperator>(Inst) ||
+                 isa<CmpInst>(Inst)))
+      return true;
+  }
+  return false;
+}
+
+bool BoUpSLP::cutTree(ArrayRef<unsigned> VecNodes) {
+  SmallPtrSet<Value *, 4> Removed;
+  if (!isGoodTreeCut(VecNodes))
+    return false;
+  // Canceling unprofitable elements.
+  for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
+    TreeEntry *Entry = VectorizableTree[I].get();
+    if (Entry->NeedToGather)
+      continue;
+    if (!is_contained(VecNodes, I)) {
+      Entry->NeedToGather = true;
+      for (Value *V : Entry->Scalars) {
+        LLVM_DEBUG(dbgs() << "SLP: Remove scalar " << *V
+                          << " out of proposed to vectorize.\n");
+        ScalarToTreeEntry.erase(V);
+        Removed.insert(V);
+        RemovedOperations.push_back(I);
+        MustGather.insert(V);
+        ExternalUses.erase(
+            std::remove_if(ExternalUses.begin(), ExternalUses.end(),
+                           [&](ExternalUser &EU) { return EU.Scalar == V; }),
+            ExternalUses.end());
+      }
+    }
+  }
+  // For all canceled operations we should consider the possibility of
+  // use by with non-canceled operations and for that, it requires
+  // to populate ExternalUser list with canceled elements.
+  for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
+    TreeEntry *Entry = VectorizableTree[I].get();
+    if (Entry->NeedToGather)
+      continue;
+    if (is_contained(VecNodes, I)) {
+      for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
+        Value *Scalar = Entry->Scalars[Lane];
+        for (User *U : Scalar->users()) {
+          LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
+          Instruction *UserInst = dyn_cast<Instruction>(U);
+          if (!UserInst)
+            continue;
+          if (!Removed.count(U))
+            continue;
+          // Ignore users in the user ignore list.
+          if (is_contained(UserIgnoreList, UserInst))
+            continue;
+          LLVM_DEBUG(dbgs()
+                     << "SLP: Need to extract canceled operation :" << *U
+                     << " from lane " << Lane << " from " << *Scalar << ".\n");
+          ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
+        }
+      }
+    }
+  }
+  return true;
+}
+
+bool BoUpSLP::tryPartialVectorization() {
+  bool Changed = false;
+  for (unsigned I = 0; I < Seeds.size(); ++I) {
+    auto &S = *Seeds[I].get();
+    // Check those seed instructions are still alive.
+    if (std::any_of(S.begin(), S.end(), [](Value *V) {
+          return (!(cast<Instruction>(V))->getOperand(0));
+        }))
+      continue;
+
+    buildTree(S);
+
+    // If other part BB were vectorized the tree might not be
+    // enough interest to look.
+    if (isTreeTinyAndNotFullyVectorizable())
+      continue;
+
+    int Cost = getTreeCost(true);
+    if (Cost < -SLPCostThreshold) {
+      vectorizeTree();
+      Changed = true;
+    }
+  }
+  Seeds.clear();
+  return Changed;
+}
+
+void BoUpSLP::recordSeeds(ArrayRef<Value *> Ops) {
+  Seeds.push_back(
+      llvm::make_unique<SmallVector<Value *, 8>>(Ops.begin(), Ops.end()));
+}
+
 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
   unsigned N;
   Type *EltTy;
@@ -2592,7 +2778,8 @@
   if (!isValidElementType(EltTy))
     return 0;
   uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
-  if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
+  if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize ||
+      VTSize != DL.getTypeStoreSizeInBits(T))
     return 0;
   if (ST) {
     // Check that struct is homogeneous.
@@ -2615,7 +2802,8 @@
 
   CurrentOrder.clear();
 
-  // We have to extract from a vector/aggregate with the same number of elements.
+  // We have to extract from a vector/aggregate with the same number of
+  // elements.
   unsigned NElts;
   if (E0->getOpcode() == Instruction::ExtractValue) {
     const DataLayout &DL = E0->getModule()->getDataLayout();
@@ -3093,7 +3281,7 @@
   unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
   int Cost = 0;
 
-  SmallPtrSet<Instruction*, 4> LiveValues;
+  SmallPtrSet<Instruction *, 4> LiveValues;
   Instruction *PrevInst = nullptr;
 
   for (const auto &TEPtr : VectorizableTree) {
@@ -3109,7 +3297,7 @@
     // Update LiveValues.
     LiveValues.erase(PrevInst);
     for (auto &J : PrevInst->operands()) {
-      if (isa<Instruction>(&*J) && getTreeEntry(&*J))
+      if (isa<Instruction>(&*J) && ScalarsToVec.count(&*J))
         LiveValues.insert(cast<Instruction>(&*J));
     }
 
@@ -3135,7 +3323,7 @@
       if ((isa<CallInst>(&*PrevInstIt) &&
            !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
           &*PrevInstIt != PrevInst) {
-        SmallVector<Type*, 4> V;
+        SmallVector<Type *, 4> V;
         for (auto *II : LiveValues)
           V.push_back(VectorType::get(II->getType(), BundleWidth));
         Cost += TTI->getCostOfKeepingLiveOverCall(V);
@@ -3150,13 +3338,214 @@
   return Cost;
 }
 
-int BoUpSLP::getTreeCost() {
-  int Cost = 0;
+int BoUpSLP::getExtractOperationCost(ExternalUser &EU) {
+  unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
+
+  // Uses by ephemeral values are free (because the ephemeral value will be
+  // removed prior to code generation, and so the extraction will be
+  // removed as well).
+  if (EphValues.count(EU.User))
+    return 0;
+
+  // If we plan to rewrite the tree in a smaller type, we will need to sign
+  // extend the extracted value back to the original type. Here, we account
+  // for the extract and the added cost of the sign extend if needed.
+  auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
+  auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
+
+  if (MinBWs.count(ScalarRoot)) {
+    auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
+    auto Extend =
+        MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
+    VecTy = VectorType::get(MinTy, BundleWidth);
+    return (TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), VecTy,
+                                          EU.Lane));
+  }
+  return TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
+}
+
+int BoUpSLP::getExtractCost() {
+  int ExtractCost = 0;
+  SmallPtrSet<Value *, 16> ExtractCostCalculated;
+  for (ExternalUser &EU : ExternalUses) {
+    // We only add extract cost once for the same scalar.
+    if (!ExtractCostCalculated.insert(EU.Scalar).second)
+      continue;
+
+    // Avoid non-vectorized scalars.
+    if (!ScalarsToVec.count(EU.Scalar)) {
+      // Consider the possibility of extracting vectorized
+      // values for canceled elements use.
+      if (InternalTreeUses.find(EU.Scalar) != InternalTreeUses.end())
+        for (ExternalUser &IU : InternalTreeUses[EU.Scalar])
+          ExtractCost += getExtractOperationCost(IU);
+      continue;
+    }
+
+    ExtractCost += getExtractOperationCost(EU);
+  }
+  return ExtractCost;
+}
+
+int BoUpSLP::getInsertCost() {
+  int InsertCost = 0;
+  for (unsigned I = 0; I < VectorizableTree.size(); I++) {
+    TreeEntry *Entry = VectorizableTree[I].get();
+    if (Entry->NeedToGather)
+      continue;
+    for (Value *V : Entry->Scalars) {
+      auto *Inst = cast<Instruction>(V);
+      for (Use &U : Inst->operands()) {
+        Value *Op = U.get();
+        if (VecToScalars.count(Op))
+          InsertCost += getGatherCost(Op);
+      }
+    }
+  }
+  return InsertCost;
+}
+
+void BoUpSLP::treeTraversal(SmallVectorImpl<unsigned> &Leaves,
+                            SmallVectorImpl<unsigned> &Branches) {
+  std::vector<unsigned> Worklist;
+  SmallSet<unsigned, 8> Visited;
+
+  Worklist.push_back(0);
+  // Does a BFS on non-gathering nodes of SLP graph and since the graph might
+  // have cycles we have to mark each visited node to process such tree.
+  Visited.insert(0);
+  do {
+    unsigned Current = Worklist.back();
+    Worklist.pop_back();
+    int NonGatherUse = 0;
+    for (unsigned Next : UseTreeIndices[Current]) {
+      // Ignore any single node connections.
+      if (Next == Current)
+        continue;
+      if (VectorizableTree[Next]->NeedToGather)
+        continue;
+      // To simplify the algorithm, ignore any cycles.
+      if (!Visited.count(Next)) {
+        NonGatherUse++;
+        Worklist.push_back(Next);
+        Visited.insert(Next);
+        VectorizableTree[Next]->NonGatherPred = Current;
+        if (NonGatherUse > 1)
+          Branches.push_back(Current);
+      }
+    }
+    // Mark this node as a leaf if we don't have any
+    // non-gathering user.
+    if (!NonGatherUse)
+      Leaves.push_back(Current);
+  } while (!Worklist.empty());
+}
+
+int BoUpSLP::cutPath(int &Cost, SmallVectorImpl<unsigned> &Path,
+                     SmallVectorImpl<unsigned> &VecNodes) {
+  do {
+    unsigned N = Path.back();
+    Path.pop_back();
+    auto I = std::find(VecNodes.begin(), VecNodes.end(), N);
+    assert(I != VecNodes.end() && "Wrong node");
+    VecNodes.erase(I);
+    Cost -= VectorizableTree[N]->Cost;
+    for (Value *V : VectorizableTree[N]->Scalars) {
+      ScalarsToVec.erase(V);
+      VecToScalars.insert(V);
+    }
+    int PartialCost = Cost;
+    int ExtractCost = getExtractCost();
+    int SpillCost = getSpillCost();
+    int InsertCost = getInsertCost();
+    PartialCost += ExtractCost + SpillCost + InsertCost;
+    if (PartialCost < -SLPCostThreshold && cutTree(VecNodes))
+      return PartialCost;
+  } while (Path.size() > 0);
+  return INT_MAX;
+}
+
+int BoUpSLP::findSubTree(int Cost) {
+  SmallVector<unsigned, 4> VecNodes;
+  SmallVector<unsigned, 1> Leaves;
+  SmallVector<unsigned, 1> Branches;
+  SmallDenseMap<unsigned, SmallVector<unsigned, 1>> LeavesFromBranch;
+
+  treeTraversal(Leaves, Branches);
+  std::reverse(Leaves.begin(), Leaves.end());
+  // For all tree nodes that are branches record leaves that are reachable
+  // from a particular branch.
+  for (unsigned N = 0, E = VectorizableTree.size(); N < E; ++N) {
+    if (VectorizableTree[N]->NeedToGather)
+      continue;
+    VecNodes.push_back(N);
+    if (is_contained(Leaves, N)) {
+      unsigned Idx = N;
+      do {
+        Idx = VectorizableTree[Idx]->NonGatherPred;
+        if (is_contained(Branches, Idx))
+          LeavesFromBranch[Idx].push_back(N);
+      } while (Idx != 0);
+    }
+  }
+
+  // Walk across all leaves in order to reduce their paths. If we encounter
+  // another branch on the way from our leaf then consider the path from given
+  // leaf ends at this branch and arrange all leaf reachable from the branch to
+  // be considering after this branch.
+  do {
+    // Stop if we are over our budget of maximum cost calculations.
+    if (CostsRecalculations >= MaxCostsRecalculations)
+      break;
+
+    unsigned L = Leaves.back();
+    Leaves.pop_back();
+    SmallVector<unsigned, 2> Path;
+    Path.push_back(L);
+    unsigned Node = L;
+
+    do {
+      Node = VectorizableTree[Node]->NonGatherPred;
+      bool FoundLeaf = false;
+      if (is_contained(Branches, Node)) {
+        for (unsigned L1 : LeavesFromBranch[Node]) {
+          if (L == L1)
+            continue;
+          auto I = std::find(Leaves.begin(), Leaves.end(), L1);
+          if (I == Leaves.end())
+            continue;
+          // Move found leaf to be processed next in the queue of leaves.
+          Leaves.erase(I);
+          Leaves.push_back(L1);
+          FoundLeaf = true;
+        }
+      }
+      // If we are done with all paths reachable from this branch to leaves,
+      // then consider nodes below to another branch or the root node.
+      if (FoundLeaf)
+        break;
+      if (!is_contained(Path, Node))
+        Path.push_back(Node);
+      if (!Node)
+        break;
+    } while (true);
+
+    std::reverse(Path.begin(), Path.end());
+    CostsRecalculations += Path.size();
+
+    int PartialCost = cutPath(Cost, Path, VecNodes);
+    if (PartialCost != INT_MAX)
+      return PartialCost;
+  } while (Leaves.size() > 0);
+
+  return INT_MAX;
+}
+
+int BoUpSLP::getTreeCost(bool CutTree) {
+  int CostSum = 0;
   LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
                     << VectorizableTree.size() << ".\n");
 
-  unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
-
   for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
     TreeEntry &TE = *VectorizableTree[I].get();
 
@@ -3180,53 +3569,58 @@
             }))
       continue;
 
-    int C = getEntryCost(&TE);
-    LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
+    for (auto &UserTreeIdx : TE.UserTreeIndices)
+      UseTreeIndices[UserTreeIdx.UserTE->Idx].push_back(I);
+
+    if (!TE.NeedToGather) {
+      for (Value *V : TE.Scalars)
+        ScalarsToVec.insert(V);
+    }
+
+    VectorizableTree[I]->Cost = getEntryCost(&TE);
+    LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorizableTree[I]->Cost
                       << " for bundle that starts with " << *TE.Scalars[0]
                       << ".\n");
-    Cost += C;
+    CostSum += VectorizableTree[I]->Cost;
   }
 
-  SmallPtrSet<Value *, 16> ExtractCostCalculated;
   int ExtractCost = 0;
-  for (ExternalUser &EU : ExternalUses) {
-    // We only add extract cost once for the same scalar.
-    if (!ExtractCostCalculated.insert(EU.Scalar).second)
-      continue;
-
-    // Uses by ephemeral values are free (because the ephemeral value will be
-    // removed prior to code generation, and so the extraction will be
-    // removed as well).
-    if (EphValues.count(EU.User))
-      continue;
-
-    // If we plan to rewrite the tree in a smaller type, we will need to sign
-    // extend the extracted value back to the original type. Here, we account
-    // for the extract and the added cost of the sign extend if needed.
-    auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
-    auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
-    if (MinBWs.count(ScalarRoot)) {
-      auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
-      auto Extend =
-          MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
-      VecTy = VectorType::get(MinTy, BundleWidth);
-      ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
-                                                   VecTy, EU.Lane);
-    } else {
-      ExtractCost +=
-          TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
-    }
-  }
+  int SpillCost = 0;
+  int Cost = CostSum;
 
-  int SpillCost = getSpillCost();
-  Cost += SpillCost + ExtractCost;
+  ExtractCost = getExtractCost();
+  Cost += ExtractCost;
+  SpillCost = getSpillCost();
+  Cost += SpillCost;
 
   std::string Str;
-  {
-    raw_string_ostream OS(Str);
-    OS << "SLP: Spill Cost = " << SpillCost << ".\n"
-       << "SLP: Extract Cost = " << ExtractCost << ".\n"
-       << "SLP: Total Cost = " << Cost << ".\n";
+  if (CutTree && Cost >= -SLPCostThreshold) {
+    for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
+      TreeEntry &TE = *VectorizableTree[I].get();
+      if (TE.NeedToGather)
+        continue;
+      int GatherCost = 0;
+      auto It = UseTreeIndices.find(I);
+      if (It != UseTreeIndices.end())
+        for (int Gather : It->second)
+          if (VectorizableTree[Gather]->NeedToGather)
+            GatherCost += VectorizableTree[Gather]->Cost;
+      TE.Cost = TE.Cost + GatherCost;
+    }
+    int PartialCost = findSubTree(CostSum);
+    if (PartialCost != INT_MAX) {
+      raw_string_ostream OS(Str);
+      OS << "SLP: Partial vectorization with Total Cost = " << PartialCost
+         << ".\n";
+      Cost = PartialCost;
+    }
+  } else {
+    {
+      raw_string_ostream OS(Str);
+      OS << "SLP: Spill Cost = " << SpillCost << ".\n"
+         << "SLP: Extract Cost = " << ExtractCost << ".\n"
+         << "SLP: Total Cost = " << Cost << ".\n";
+    }
   }
   LLVM_DEBUG(dbgs() << Str);
 
@@ -3948,7 +4342,12 @@
 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
   // All blocks must be scheduled before any instructions are inserted.
   for (auto &BSIter : BlocksSchedules) {
-    scheduleBlock(BSIter.second.get());
+    BlockScheduling *BS = BSIter.second.get();
+    // Remove all Schedule Data from all nodes that we have changed
+    // vectorization decision.
+    if (RemovedOperations.size())
+      removeFromScheduling(BS);
+    scheduleBlock(BS);
   }
 
   Builder.SetInsertPoint(&F->getEntryBlock().front());
@@ -4077,13 +4476,16 @@
       Type *Ty = Scalar->getType();
       if (!Ty->isVoidTy()) {
 #ifndef NDEBUG
-        for (User *U : Scalar->users()) {
-          LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
-
-          // It is legal to replace users in the ignorelist by undef.
-          assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
-                 "Replacing out-of-tree value with undef");
-        }
+        // The tree might not be fully vectorized, so we don't have to
+        // check every user.
+        if (!RemovedOperations.size())
+          for (User *U : Scalar->users()) {
+            LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
+
+            // It is legal to replace users in the ignorelist by undef.
+            assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
+                   "Replacing out-of-tree value with undef");
+          }
 #endif
         Value *Undef = UndefValue::get(Ty);
         Scalar->replaceAllUsesWith(Undef);
@@ -4560,6 +4962,32 @@
   ReadyInsts.clear();
 }
 
+void BoUpSLP::removeFromScheduling(BlockScheduling *BS) {
+  bool Removed = false;
+  for (int I : RemovedOperations) {
+    TreeEntry *Entry = VectorizableTree[I].get();
+    ScheduleData *SD = BS->getScheduleData(Entry->Scalars[0]);
+    if (SD && SD->isPartOfBundle()) {
+      if (!Removed) {
+        Removed = true;
+        BS->resetSchedule();
+      }
+      BS->cancelScheduling(Entry->Scalars, SD->OpValue);
+    }
+  }
+  if (!Removed)
+    return;
+  BS->resetSchedule();
+  BS->initialFillReadyList(BS->ReadyInsts);
+  for (Instruction *I = BS->ScheduleStart; I != BS->ScheduleEnd;
+       I = I->getNextNode()) {
+    if (BS->ScheduleDataMap.find(I) == BS->ScheduleDataMap.end())
+      continue;
+    BS->doForAllOpcodes(I,
+                        [&](ScheduleData *SD) { SD->clearDependencies(); });
+  }
+}
+
 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
   if (!BS->ScheduleStart)
     return;
@@ -5028,6 +5456,12 @@
                         << " underlying objects.\n");
       Changed |= vectorizeGEPIndices(BB, R);
     }
+
+    // Partially vectorize trees after all full vectorization is done,
+    // otherwise, we could prevent more profitable full vectorization with
+    // smaller vector sizes.
+    if (SLPThrottling)
+      Changed |= R.tryPartialVectorization();
   }
 
   if (Changed) {
@@ -5103,6 +5537,8 @@
       // Move to the next bundle.
       i += VF - 1;
       Changed = true;
+    } else {
+      R.recordSeeds(Operands);
     }
   }
 
@@ -5332,6 +5768,8 @@
         I += VF - 1;
         NextInst = I + 1;
         Changed = true;
+      } else {
+        R.recordSeeds(Ops);
       }
     }
   }
Index: test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
===================================================================
--- test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
+++ test/Transforms/SLPVectorizer/AArch64/ext-trunc.ll
@@ -12,21 +12,18 @@
 ; CHECK-NEXT:    [[Z0:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i32>
 ; CHECK-NEXT:    [[Z1:%.*]] = zext <4 x i16> [[B:%.*]] to <4 x i32>
 ; CHECK-NEXT:    [[SUB0:%.*]] = sub <4 x i32> [[Z0]], [[Z1]]
-; CHECK-NEXT:    [[E0:%.*]] = extractelement <4 x i32> [[SUB0]], i32 0
-; CHECK-NEXT:    [[S0:%.*]] = sext i32 [[E0]] to i64
-; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[S0]]
+; CHECK-NEXT:    [[TMP0:%.*]] = sext <4 x i32> [[SUB0]] to <4 x i64>
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0
+; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[TMP1]]
 ; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[GEP0]]
-; CHECK-NEXT:    [[E1:%.*]] = extractelement <4 x i32> [[SUB0]], i32 1
-; CHECK-NEXT:    [[S1:%.*]] = sext i32 [[E1]] to i64
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[S1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[GEP1]]
-; CHECK-NEXT:    [[E2:%.*]] = extractelement <4 x i32> [[SUB0]], i32 2
-; CHECK-NEXT:    [[S2:%.*]] = sext i32 [[E2]] to i64
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[S2]]
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP3]]
 ; CHECK-NEXT:    [[LOAD2:%.*]] = load i64, i64* [[GEP2]]
-; CHECK-NEXT:    [[E3:%.*]] = extractelement <4 x i32> [[SUB0]], i32 3
-; CHECK-NEXT:    [[S3:%.*]] = sext i32 [[E3]] to i64
-; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[S3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP4]]
 ; CHECK-NEXT:    [[LOAD3:%.*]] = load i64, i64* [[GEP3]]
 ; CHECK-NEXT:    call void @foo(i64 [[LOAD0]], i64 [[LOAD1]], i64 [[LOAD2]], i64 [[LOAD3]])
 ; CHECK-NEXT:    ret void
Index: test/Transforms/SLPVectorizer/AArch64/horizontal.ll
===================================================================
--- test/Transforms/SLPVectorizer/AArch64/horizontal.ll
+++ test/Transforms/SLPVectorizer/AArch64/horizontal.ll
@@ -28,39 +28,42 @@
 ; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[S_026:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[J_025:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[P2_024:%.*]] = phi i32* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR29:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[P1_023:%.*]] = phi i32* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x i32> [ zeroinitializer, [[FOR_BODY_LR_PH]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 1
 ; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 1
 ; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 2
 ; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 2
 ; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 3
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P1_023]] to <4 x i32>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[P1_023]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[P2_024]] to <4 x i32>*
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
-; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP4]]
-; CHECK-NEXT:    [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]]
-; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 undef, [[S_026]]
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[P2_024]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp slt <4 x i32> [[TMP5]], zeroinitializer
+; CHECK-NEXT:    [[TMP7:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> [[TMP5]]
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 undef, [[TMP9]]
 ; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[ADD]], undef
 ; CHECK-NEXT:    [[ADD19:%.*]] = add nsw i32 [[ADD11]], undef
-; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP7]])
-; CHECK-NEXT:    [[OP_EXTRA]] = add nsw i32 [[TMP8]], [[S_026]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> [[TMP8]])
 ; CHECK-NEXT:    [[ADD27:%.*]] = add nsw i32 [[ADD19]], undef
 ; CHECK-NEXT:    [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_023]], i64 [[IDX_EXT]]
 ; CHECK-NEXT:    [[ADD_PTR29]] = getelementptr inbounds i32, i32* [[P2_024]], i64 [[IDX_EXT]]
-; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[J_025]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[H]]
+; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i32> undef, i32 [[TMP10]], i32 0
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <2 x i32> [[TMP11]], i32 1, i32 1
+; CHECK-NEXT:    [[TMP13]] = add nsw <2 x i32> [[TMP12]], [[TMP0]]
+; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[TMP14]], [[H]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
 ; CHECK:       for.end.loopexit:
+; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0
 ; CHECK-NEXT:    br label [[FOR_END]]
 ; CHECK:       for.end:
-; CHECK-NEXT:    [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-NEXT:    [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP15]], [[FOR_END_LOOPEXIT]] ]
 ; CHECK-NEXT:    ret i32 [[S_0_LCSSA]]
 ;
 entry:
Index: test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
+++ test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
@@ -12,38 +12,36 @@
 ; SSE:       for.body:
 ; SSE-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; SSE-NEXT:    [[ACC1_056:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD13:%.*]], [[FOR_BODY]] ]
-; SSE-NEXT:    [[S1_055:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[COND_I40:%.*]], [[FOR_BODY]] ]
-; SSE-NEXT:    [[S0_054:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[COND_I44:%.*]], [[FOR_BODY]] ]
+; SSE-NEXT:    [[TMP0:%.*]] = phi <2 x float> [ zeroinitializer, [[ENTRY]] ], [ [[TMP18:%.*]], [[FOR_BODY]] ]
 ; SSE-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 [[INDVARS_IV]]
-; SSE-NEXT:    [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; SSE-NEXT:    [[TMP1:%.*]] = load float, float* [[ARRAYIDX]], align 4
 ; SSE-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; SSE-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[DEST:%.*]], i64 [[INDVARS_IV]]
 ; SSE-NEXT:    store float [[ACC1_056]], float* [[ARRAYIDX2]], align 4
-; SSE-NEXT:    [[ADD:%.*]] = fadd float [[S0_054]], [[TMP0]]
-; SSE-NEXT:    [[ADD3:%.*]] = fadd float [[S1_055]], [[TMP0]]
-; SSE-NEXT:    [[MUL:%.*]] = fmul float [[S0_054]], 0.000000e+00
-; SSE-NEXT:    [[ADD4:%.*]] = fadd float [[MUL]], [[ADD3]]
-; SSE-NEXT:    [[MUL5:%.*]] = fmul float [[S1_055]], 0.000000e+00
-; SSE-NEXT:    [[ADD6:%.*]] = fadd float [[MUL5]], [[ADD]]
-; SSE-NEXT:    [[CMP_I:%.*]] = fcmp olt float [[ADD6]], 1.000000e+00
-; SSE-NEXT:    [[COND_I:%.*]] = select i1 [[CMP_I]], float [[ADD6]], float 1.000000e+00
-; SSE-NEXT:    [[CMP_I51:%.*]] = fcmp olt float [[COND_I]], -1.000000e+00
-; SSE-NEXT:    [[CMP_I49:%.*]] = fcmp olt float [[ADD4]], 1.000000e+00
-; SSE-NEXT:    [[COND_I50:%.*]] = select i1 [[CMP_I49]], float [[ADD4]], float 1.000000e+00
-; SSE-NEXT:    [[CMP_I47:%.*]] = fcmp olt float [[COND_I50]], -1.000000e+00
-; SSE-NEXT:    [[COND_I_OP:%.*]] = fmul float [[COND_I]], 0.000000e+00
-; SSE-NEXT:    [[MUL10:%.*]] = select i1 [[CMP_I51]], float -0.000000e+00, float [[COND_I_OP]]
-; SSE-NEXT:    [[COND_I50_OP:%.*]] = fmul float [[COND_I50]], 0.000000e+00
-; SSE-NEXT:    [[MUL11:%.*]] = select i1 [[CMP_I47]], float -0.000000e+00, float [[COND_I50_OP]]
-; SSE-NEXT:    [[ADD13]] = fadd float [[MUL10]], [[MUL11]]
+; SSE-NEXT:    [[TMP2:%.*]] = extractelement <2 x float> [[TMP0]], i32 1
+; SSE-NEXT:    [[ADD:%.*]] = fadd float [[TMP2]], [[TMP1]]
+; SSE-NEXT:    [[TMP3:%.*]] = extractelement <2 x float> [[TMP0]], i32 0
+; SSE-NEXT:    [[ADD3:%.*]] = fadd float [[TMP3]], [[TMP1]]
+; SSE-NEXT:    [[TMP4:%.*]] = fmul <2 x float> [[TMP0]], zeroinitializer
+; SSE-NEXT:    [[TMP5:%.*]] = insertelement <2 x float> undef, float [[ADD]], i32 0
+; SSE-NEXT:    [[TMP6:%.*]] = insertelement <2 x float> [[TMP5]], float [[ADD3]], i32 1
+; SSE-NEXT:    [[TMP7:%.*]] = fadd <2 x float> [[TMP4]], [[TMP6]]
+; SSE-NEXT:    [[TMP8:%.*]] = fcmp olt <2 x float> [[TMP7]], <float 1.000000e+00, float 1.000000e+00>
+; SSE-NEXT:    [[TMP9:%.*]] = select <2 x i1> [[TMP8]], <2 x float> [[TMP7]], <2 x float> <float 1.000000e+00, float 1.000000e+00>
+; SSE-NEXT:    [[TMP10:%.*]] = fcmp olt <2 x float> [[TMP9]], <float -1.000000e+00, float -1.000000e+00>
+; SSE-NEXT:    [[TMP11:%.*]] = fmul <2 x float> [[TMP9]], zeroinitializer
+; SSE-NEXT:    [[TMP12:%.*]] = select <2 x i1> [[TMP10]], <2 x float> <float -0.000000e+00, float -0.000000e+00>, <2 x float> [[TMP11]]
+; SSE-NEXT:    [[TMP13:%.*]] = extractelement <2 x float> [[TMP12]], i32 0
+; SSE-NEXT:    [[TMP14:%.*]] = extractelement <2 x float> [[TMP12]], i32 1
+; SSE-NEXT:    [[ADD13]] = fadd float [[TMP13]], [[TMP14]]
 ; SSE-NEXT:    [[CMP_I45:%.*]] = fcmp olt float [[ADD13]], 1.000000e+00
 ; SSE-NEXT:    [[COND_I46:%.*]] = select i1 [[CMP_I45]], float [[ADD13]], float 1.000000e+00
-; SSE-NEXT:    [[CMP_I43:%.*]] = fcmp olt float [[COND_I46]], -1.000000e+00
-; SSE-NEXT:    [[COND_I44]] = select i1 [[CMP_I43]], float -1.000000e+00, float [[COND_I46]]
-; SSE-NEXT:    [[CMP_I41:%.*]] = fcmp olt float [[MUL11]], 1.000000e+00
-; SSE-NEXT:    [[COND_I42:%.*]] = select i1 [[CMP_I41]], float [[MUL11]], float 1.000000e+00
-; SSE-NEXT:    [[CMP_I39:%.*]] = fcmp olt float [[COND_I42]], -1.000000e+00
-; SSE-NEXT:    [[COND_I40]] = select i1 [[CMP_I39]], float -1.000000e+00, float [[COND_I42]]
+; SSE-NEXT:    [[CMP_I41:%.*]] = fcmp olt float [[TMP14]], 1.000000e+00
+; SSE-NEXT:    [[COND_I42:%.*]] = select i1 [[CMP_I41]], float [[TMP14]], float 1.000000e+00
+; SSE-NEXT:    [[TMP15:%.*]] = insertelement <2 x float> undef, float [[COND_I42]], i32 0
+; SSE-NEXT:    [[TMP16:%.*]] = insertelement <2 x float> [[TMP15]], float [[COND_I46]], i32 1
+; SSE-NEXT:    [[TMP17:%.*]] = fcmp olt <2 x float> [[TMP16]], <float -1.000000e+00, float -1.000000e+00>
+; SSE-NEXT:    [[TMP18]] = select <2 x i1> [[TMP17]], <2 x float> <float -1.000000e+00, float -1.000000e+00>, <2 x float> [[TMP16]]
 ; SSE-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 32
 ; SSE-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
 ; SSE:       for.end:
Index: test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
+++ test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
@@ -15,18 +15,20 @@
 ; CHECK:       for.body6:
 ; CHECK-NEXT:    br label [[FOR_BODY12:%.*]]
 ; CHECK:       for.body12:
-; CHECK-NEXT:    [[FZIMG_069:%.*]] = phi double [ undef, [[FOR_BODY6]] ], [ [[ADD19:%.*]], [[IF_END:%.*]] ]
-; CHECK-NEXT:    [[FZREAL_068:%.*]] = phi double [ undef, [[FOR_BODY6]] ], [ [[ADD20:%.*]], [[IF_END]] ]
-; CHECK-NEXT:    [[MUL13:%.*]] = fmul double [[FZREAL_068]], [[FZREAL_068]]
-; CHECK-NEXT:    [[MUL14:%.*]] = fmul double [[FZIMG_069]], [[FZIMG_069]]
-; CHECK-NEXT:    [[ADD15:%.*]] = fadd double [[MUL13]], [[MUL14]]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ undef, [[FOR_BODY6]] ], [ [[TMP7:%.*]], [[IF_END:%.*]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul <2 x double> [[TMP0]], [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
+; CHECK-NEXT:    [[ADD15:%.*]] = fadd double [[TMP2]], [[TMP3]]
 ; CHECK-NEXT:    [[CMP16:%.*]] = fcmp ogt double [[ADD15]], 4.000000e+00
 ; CHECK-NEXT:    br i1 [[CMP16]], label [[FOR_INC21:%.*]], label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[MUL18:%.*]] = fmul double undef, [[FZIMG_069]]
-; CHECK-NEXT:    [[ADD19]] = fadd double undef, [[MUL18]]
-; CHECK-NEXT:    [[SUB:%.*]] = fsub double [[MUL13]], [[MUL14]]
-; CHECK-NEXT:    [[ADD20]] = fadd double undef, [[SUB]]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP0]], i32 1
+; CHECK-NEXT:    [[MUL18:%.*]] = fmul double undef, [[TMP4]]
+; CHECK-NEXT:    [[SUB:%.*]] = fsub double [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> undef, double [[SUB]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[MUL18]], i32 1
+; CHECK-NEXT:    [[TMP7]] = fadd <2 x double> undef, [[TMP6]]
 ; CHECK-NEXT:    br i1 undef, label [[FOR_BODY12]], label [[FOR_INC21]]
 ; CHECK:       for.inc21:
 ; CHECK-NEXT:    br i1 undef, label [[FOR_END23:%.*]], label [[FOR_BODY6]]
Index: test/Transforms/SLPVectorizer/X86/lookahead.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/lookahead.ll
+++ test/Transforms/SLPVectorizer/X86/lookahead.ll
@@ -30,14 +30,18 @@
 ; CHECK-NEXT:    [[C_1:%.*]] = load double, double* [[IDX5]], align 8
 ; CHECK-NEXT:    [[D_0:%.*]] = load double, double* [[IDX6]], align 8
 ; CHECK-NEXT:    [[D_1:%.*]] = load double, double* [[IDX7]], align 8
-; CHECK-NEXT:    [[SUBAB_0:%.*]] = fsub fast double [[A_0]], [[B_0]]
 ; CHECK-NEXT:    [[SUBCD_0:%.*]] = fsub fast double [[C_0]], [[D_0]]
 ; CHECK-NEXT:    [[SUBAB_1:%.*]] = fsub fast double [[A_1]], [[B_1]]
-; CHECK-NEXT:    [[SUBCD_1:%.*]] = fsub fast double [[C_1]], [[D_1]]
-; CHECK-NEXT:    [[ADDABCD_0:%.*]] = fadd fast double [[SUBAB_0]], [[SUBCD_0]]
-; CHECK-NEXT:    [[ADDCDAB_1:%.*]] = fadd fast double [[SUBCD_1]], [[SUBAB_1]]
-; CHECK-NEXT:    store double [[ADDABCD_0]], double* [[IDX0]], align 8
-; CHECK-NEXT:    store double [[ADDCDAB_1]], double* [[IDX1]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[A_0]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[C_1]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> undef, double [[B_0]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[D_1]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = fsub fast <2 x double> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> undef, double [[SUBCD_0]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[SUBAB_1]], i32 1
+; CHECK-NEXT:    [[TMP7:%.*]] = fadd fast <2 x double> [[TMP4]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[IDX0]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP7]], <2 x double>* [[TMP8]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
Index: test/Transforms/SLPVectorizer/X86/slp-throttle.ll
===================================================================
--- test/Transforms/SLPVectorizer/X86/slp-throttle.ll
+++ test/Transforms/SLPVectorizer/X86/slp-throttle.ll
@@ -5,18 +5,20 @@
 ; CHECK-LABEL: @rftbsub(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 2
-; CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[ARRAYIDX6]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = or i64 2, 1
-; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[ARRAYIDX12]], align 8
-; CHECK-NEXT:    [[ADD16:%.*]] = fadd double [[TMP2]], undef
+; CHECK-NEXT:    [[TMP0:%.*]] = or i64 2, 1
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 1
+; CHECK-NEXT:    [[ADD16:%.*]] = fadd double [[TMP3]], undef
 ; CHECK-NEXT:    [[MUL18:%.*]] = fmul double undef, [[ADD16]]
 ; CHECK-NEXT:    [[ADD19:%.*]] = fadd double undef, [[MUL18]]
 ; CHECK-NEXT:    [[SUB22:%.*]] = fsub double undef, undef
-; CHECK-NEXT:    [[SUB25:%.*]] = fsub double [[TMP0]], [[ADD19]]
-; CHECK-NEXT:    store double [[SUB25]], double* [[ARRAYIDX6]], align 8
-; CHECK-NEXT:    [[SUB29:%.*]] = fsub double [[TMP2]], [[SUB22]]
-; CHECK-NEXT:    store double [[SUB29]], double* [[ARRAYIDX12]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x double> undef, double [[ADD19]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[SUB22]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = fsub <2 x double> [[TMP2]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 8
 ; CHECK-NEXT:    unreachable
 ;
 entry: