diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -24,7 +24,7 @@
 #include "llvm/CodeGen/ISDOpcodes.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/ScheduleDAG.h"
 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
 #include "llvm/CodeGen/SchedulerRegistry.h"
@@ -166,33 +166,32 @@
   /// that are "live". These nodes must be scheduled before any other nodes that
   /// modifies the registers can be scheduled.
   unsigned NumLiveRegs;
-  std::unique_ptr<SUnit*[]> LiveRegDefs;
-  std::unique_ptr<SUnit*[]> LiveRegGens;
+  std::unique_ptr<SUnit *[]> LiveRegDefs;
+  std::unique_ptr<SUnit *[]> LiveRegGens;
 
   // Collect interferences between physical register use/defs.
   // Each interference is an SUnit and set of physical registers.
-  SmallVector<SUnit*, 4> Interferences;
+  SmallVector<SUnit *, 4> Interferences;
 
   using LRegsMapT = DenseMap<SUnit *, SmallVector<unsigned, 4>>;
 
   LRegsMapT LRegsMap;
 
-  /// Topo - A topological ordering for SUnits which permits fast IsReachable
+  /// Topo - A topological ordering for SUnits which permits fast isReachable
   /// and similar queries.
   ScheduleDAGTopologicalSort Topo;
 
   // Hack to keep track of the inverse of FindCallSeqStart without more crazy
   // DAG crawling.
-  DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
+  DenseMap<SUnit *, SUnit *> CallSeqEndForStart;
 
 public:
-  ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
-                    SchedulingPriorityQueue *availqueue,
+  ScheduleDAGRRList(MachineFunction &MF, bool NeedLatency,
+                    SchedulingPriorityQueue *AvailableQueue,
                     CodeGenOpt::Level OptLevel)
-    : ScheduleDAGSDNodes(mf),
-      NeedLatency(needlatency), AvailableQueue(availqueue),
-      Topo(SUnits, nullptr) {
-    const TargetSubtargetInfo &STI = mf.getSubtarget();
+      : ScheduleDAGSDNodes(MF), NeedLatency(NeedLatency),
+        AvailableQueue(AvailableQueue), Topo(SUnits, nullptr) {
+    const TargetSubtargetInfo &STI = MF.getSubtarget();
     if (DisableSchedCycles || !NeedLatency)
       HazardRec = new ScheduleHazardRecognizer();
     else
@@ -208,37 +207,37 @@
 
   ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
 
-  /// IsReachable - Checks if SU is reachable from TargetSU.
-  bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
+  /// isReachable - Checks if SU is reachable from TargetSU.
+  bool isReachable(const SUnit *SU, const SUnit *TargetSU) {
     return Topo.IsReachable(SU, TargetSU);
   }
 
-  /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
+  /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
   /// create a cycle.
-  bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
+  bool willCreateCycle(SUnit *SU, SUnit *TargetSU) {
     return Topo.WillCreateCycle(SU, TargetSU);
   }
 
-  /// AddPredQueued - Queues and update to add a predecessor edge to SUnit SU.
+  /// addPredQueued - Queues and update to add a predecessor edge to SUnit SU.
   /// This returns true if this is a new predecessor.
   /// Does *NOT* update the topological ordering! It just queues an update.
-  void AddPredQueued(SUnit *SU, const SDep &D) {
+  void addPredQueued(SUnit *SU, const SDep &D) {
     Topo.AddPredQueued(SU, D.getSUnit());
     SU->addPred(D);
   }
 
-  /// AddPred - adds a predecessor edge to SUnit SU.
+  /// addPred - adds a predecessor edge to SUnit SU.
   /// This returns true if this is a new predecessor.
   /// Updates the topological ordering if required.
-  void AddPred(SUnit *SU, const SDep &D) {
+  void addPred(SUnit *SU, const SDep &D) {
     Topo.AddPred(SU, D.getSUnit());
     SU->addPred(D);
   }
 
-  /// RemovePred - removes a predecessor edge from SUnit SU.
+  /// removePred - removes a predecessor edge from SUnit SU.
   /// This returns true if an edge was removed.
   /// Updates the topological ordering if required.
-  void RemovePred(SUnit *SU, const SDep &D) {
+  void removePred(SUnit *SU, const SDep &D) {
     Topo.RemovePred(SU, D.getSUnit());
     SU->removePred(D);
   }
@@ -246,35 +245,34 @@
 private:
   bool isReady(SUnit *SU) {
     return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
-      AvailableQueue->isReady(SU);
-  }
-
-  void ReleasePred(SUnit *SU, const SDep *PredEdge);
-  void ReleasePredecessors(SUnit *SU);
-  void ReleasePending();
-  void AdvanceToCycle(unsigned NextCycle);
-  void AdvancePastStalls(SUnit *SU);
-  void EmitNode(SUnit *SU);
-  void ScheduleNodeBottomUp(SUnit*);
-  void CapturePred(SDep *PredEdge);
-  void UnscheduleNodeBottomUp(SUnit*);
-  void RestoreHazardCheckerBottomUp();
-  void BacktrackBottomUp(SUnit*, SUnit*);
-  SUnit *TryUnfoldSU(SUnit *);
-  SUnit *CopyAndMoveSuccessors(SUnit*);
-  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
-                                const TargetRegisterClass*,
-                                const TargetRegisterClass*,
-                                SmallVectorImpl<SUnit*>&);
-  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
+           AvailableQueue->isReady(SU);
+  }
+
+  void releasePred(SUnit *SU, const SDep *PredEdge);
+  void releasePredecessors(SUnit *SU);
+  void releasePending();
+  void advanceToCycle(unsigned NextCycle);
+  void advancePastStalls(SUnit *SU);
+  void emitNode(SUnit *SU);
+  void scheduleNodeBottomUp(SUnit *);
+  void capturePred(SDep *PredEdge);
+  void unscheduleNodeBottomUp(SUnit *);
+  void restoreHazardCheckerBottomUp();
+  void backtrackBottomUp(SUnit *, SUnit *);
+  SUnit *tryUnfoldSU(SUnit *);
+  SUnit *copyAndMoveSuccessors(SUnit *);
+  void insertCopiesAndMoveSuccs(SUnit *, unsigned, const TargetRegisterClass *,
+                                const TargetRegisterClass *,
+                                SmallVectorImpl<SUnit *> &);
+  bool delayForLiveRegsBottomUp(SUnit *, SmallVectorImpl<unsigned> &);
 
   void releaseInterferences(unsigned Reg = 0);
 
-  SUnit *PickNodeToScheduleBottomUp();
-  void ListScheduleBottomUp();
+  SUnit *pickNodeToScheduleBottomUp();
+  void listScheduleBottomUp();
 
-  /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
-  SUnit *CreateNewSUnit(SDNode *N) {
+  /// createNewSUnit - Creates a new SUnit and returns a pointer to it.
+  SUnit *createNewSUnit(SDNode *N) {
     unsigned NumSUnits = SUnits.size();
     SUnit *NewNode = newSUnit(N);
     // Update the topological ordering.
@@ -283,8 +281,8 @@
     return NewNode;
   }
 
-  /// CreateClone - Creates a new SUnit from an existing one.
-  SUnit *CreateClone(SUnit *N) {
+  /// createClone - Creates a new SUnit from an existing one.
+  SUnit *createClone(SUnit *N) {
     unsigned NumSUnits = SUnits.size();
     SUnit *NewNode = Clone(N);
     // Update the topological ordering.
@@ -295,23 +293,19 @@
 
   /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
   /// need actual latency information but the hybrid scheduler does.
-  bool forceUnitLatencies() const override {
-    return !NeedLatency;
-  }
+  bool forceUnitLatencies() const override { return !NeedLatency; }
 };
 
-}  // end anonymous namespace
+} // end anonymous namespace
 
-/// GetCostForDef - Looks up the register class and cost for a given definition.
+/// getCostForDef - Looks up the register class and cost for a given definition.
 /// Typically this just means looking up the representative register class,
 /// but for untyped values (MVT::Untyped) it means inspecting the node's
 /// opcode to determine what register class is being generated.
-static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
-                          const TargetLowering *TLI,
-                          const TargetInstrInfo *TII,
-                          const TargetRegisterInfo *TRI,
-                          unsigned &RegClass, unsigned &Cost,
-                          const MachineFunction &MF) {
+static void getCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
+                          const TargetLowering *TLI, const TargetInstrInfo *TII,
+                          const TargetRegisterInfo *TRI, unsigned &RegClass,
+                          unsigned &Cost, const MachineFunction &MF) {
   MVT VT = RegDefPos.GetValue();
 
   // Special handling for untyped values.  These values can only come from
@@ -321,7 +315,7 @@
 
     // Special handling for CopyFromReg of untyped values.
     if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
-      unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+      Register Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
       const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
       RegClass = RC->getID();
       Cost = 1;
@@ -330,7 +324,8 @@
 
     unsigned Opcode = Node->getMachineOpcode();
     if (Opcode == TargetOpcode::REG_SEQUENCE) {
-      unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+      unsigned DstRCIdx =
+          cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
       const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
       RegClass = RC->getID();
       Cost = 1;
@@ -362,8 +357,8 @@
   NumLiveRegs = 0;
   // Allocate slots for each physical register, plus one for a special register
   // to track the virtual resource of a calling sequence.
-  LiveRegDefs.reset(new SUnit*[TRI->getNumRegs() + 1]());
-  LiveRegGens.reset(new SUnit*[TRI->getNumRegs() + 1]());
+  LiveRegDefs.reset(new SUnit *[TRI->getNumRegs() + 1]());
+  LiveRegGens.reset(new SUnit *[TRI->getNumRegs() + 1]());
   CallSeqEndForStart.clear();
   assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
 
@@ -378,7 +373,7 @@
   HazardRec->Reset();
 
   // Execute the actual scheduling loop.
-  ListScheduleBottomUp();
+  listScheduleBottomUp();
 
   AvailableQueue->releaseState();
 
@@ -393,9 +388,9 @@
 //  Bottom-Up Scheduling
 //===----------------------------------------------------------------------===//
 
-/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
+/// releasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
-void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
+void ScheduleDAGRRList::releasePred(SUnit *SU, const SDep *PredEdge) {
   SUnit *PredSU = PredEdge->getSUnit();
 
 #ifndef NDEBUG
@@ -435,10 +430,9 @@
   }
 }
 
-/// IsChainDependent - Test if Outer is reachable from Inner through
+/// isChainDependent - Test if Outer is reachable from Inner through
 /// chain dependencies.
-static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
-                             unsigned NestLevel,
+static bool isChainDependent(SDNode *Outer, SDNode *Inner, unsigned NestLevel,
                              const TargetInstrInfo *TII) {
   SDNode *N = Outer;
   while (true) {
@@ -449,7 +443,7 @@
     // most nesting in order to ensure that we find the corresponding match.
     if (N->getOpcode() == ISD::TokenFactor) {
       for (const SDValue &Op : N->op_values())
-        if (IsChainDependent(Op.getNode(), Inner, NestLevel, TII))
+        if (isChainDependent(Op.getNode(), Inner, NestLevel, TII))
           return true;
       return false;
     }
@@ -476,7 +470,7 @@
   }
 }
 
-/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
+/// findCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
 /// the corresponding (lowered) CALLSEQ_BEGIN node.
 ///
 /// NestLevel and MaxNested are used in recursion to indcate the current level
@@ -485,9 +479,8 @@
 ///
 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
-static SDNode *
-FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
-                 const TargetInstrInfo *TII) {
+static SDNode *findCallSeqStart(SDNode *N, unsigned &NestLevel,
+                                unsigned &MaxNest, const TargetInstrInfo *TII) {
   while (true) {
     // For a TokenFactor, examine each operand. There may be multiple ways
     // to get to the CALLSEQ_BEGIN, but we need to find the path with the
@@ -498,8 +491,8 @@
       for (const SDValue &Op : N->op_values()) {
         unsigned MyNestLevel = NestLevel;
         unsigned MyMaxNest = MaxNest;
-        if (SDNode *New = FindCallSeqStart(Op.getNode(),
-                                           MyNestLevel, MyMaxNest, TII))
+        if (SDNode *New =
+                findCallSeqStart(Op.getNode(), MyNestLevel, MyMaxNest, TII))
           if (!Best || (MyMaxNest > BestMaxNest)) {
             Best = New;
             BestMaxNest = MyMaxNest;
@@ -534,7 +527,7 @@
   }
 }
 
-/// Call ReleasePred for each predecessor, then update register live def/gen.
+/// Call releasePred for each predecessor, then update register live def/gen.
 /// Always update LiveRegDefs for a register dependence even if the current SU
 /// also defines the register. This effectively create one large live range
 /// across a sequence of two-address node. This is important because the
@@ -551,16 +544,17 @@
 ///
 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
 /// interference on flags.
-void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
+void ScheduleDAGRRList::releasePredecessors(SUnit *SU) {
   // Bottom up: release predecessors
   for (SDep &Pred : SU->Preds) {
-    ReleasePred(SU, &Pred);
+    releasePred(SU, &Pred);
     if (Pred.isAssignedRegDep()) {
-      // This is a physical register dependency and it's impossible or
+      // This is a physical register dependency, and it's impossible or
       // expensive to copy the register. Make sure nothing that can
       // clobber the register is scheduled between the predecessor and
       // this node.
-      SUnit *RegDef = LiveRegDefs[Pred.getReg()]; (void)RegDef;
+      SUnit *RegDef = LiveRegDefs[Pred.getReg()];
+      (void)RegDef;
       assert((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) &&
              "interference on register dependence");
       LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
@@ -581,7 +575,7 @@
           Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
         unsigned NestLevel = 0;
         unsigned MaxNest = 0;
-        SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
+        SDNode *N = findCallSeqStart(Node, NestLevel, MaxNest, TII);
         assert(N && "Must find call sequence start");
 
         SUnit *Def = &SUnits[N->getNodeId()];
@@ -596,7 +590,7 @@
 
 /// Check to see if any of the pending instructions are ready to issue.  If
 /// so, add them to the available queue.
-void ScheduleDAGRRList::ReleasePending() {
+void ScheduleDAGRRList::releasePending() {
   if (DisableSchedCycles) {
     assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
     return;
@@ -608,25 +602,26 @@
 
   // Check to see if any of the pending instructions are ready to issue.  If
   // so, add them to the available queue.
-  for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
-    unsigned ReadyCycle = PendingQueue[i]->getHeight();
+  for (unsigned I = 0, E = PendingQueue.size(); I != E; ++I) {
+    unsigned ReadyCycle = PendingQueue[I]->getHeight();
     if (ReadyCycle < MinAvailableCycle)
       MinAvailableCycle = ReadyCycle;
 
-    if (PendingQueue[i]->isAvailable) {
-      if (!isReady(PendingQueue[i]))
-          continue;
-      AvailableQueue->push(PendingQueue[i]);
+    if (PendingQueue[I]->isAvailable) {
+      if (!isReady(PendingQueue[I]))
+        continue;
+      AvailableQueue->push(PendingQueue[I]);
     }
-    PendingQueue[i]->isPending = false;
-    PendingQueue[i] = PendingQueue.back();
+    PendingQueue[I]->isPending = false;
+    PendingQueue[I] = PendingQueue.back();
     PendingQueue.pop_back();
-    --i; --e;
+    --I;
+    --E;
   }
 }
 
 /// Move the scheduler state forward by the specified number of Cycles.
-void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
+void ScheduleDAGRRList::advanceToCycle(unsigned NextCycle) {
   if (NextCycle <= CurCycle)
     return;
 
@@ -635,20 +630,19 @@
   if (!HazardRec->isEnabled()) {
     // Bypass lots of virtual calls in case of long latency.
     CurCycle = NextCycle;
-  }
-  else {
+  } else {
     for (; CurCycle != NextCycle; ++CurCycle) {
       HazardRec->RecedeCycle();
     }
   }
   // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
   // available Q to release pending nodes at least once before popping.
-  ReleasePending();
+  releasePending();
 }
 
 /// Move the scheduler state forward until the specified node's dependents are
 /// ready and can be scheduled with no resource conflicts.
-void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
+void ScheduleDAGRRList::advancePastStalls(SUnit *SU) {
   if (DisableSchedCycles)
     return;
 
@@ -665,7 +659,7 @@
   // available instructions may be hidden by the stall (not a full pipe stall).
   // This updates the hazard recognizer's cycle before reserving resources for
   // this instruction.
-  AdvanceToCycle(ReadyCycle);
+  advanceToCycle(ReadyCycle);
 
   // Calls are scheduled in their preceding cycle, so don't conflict with
   // hazards from instructions after the call. EmitNode will reset the
@@ -678,19 +672,19 @@
   int Stalls = 0;
   while (true) {
     ScheduleHazardRecognizer::HazardType HT =
-      HazardRec->getHazardType(SU, -Stalls);
+        HazardRec->getHazardType(SU, -Stalls);
 
     if (HT == ScheduleHazardRecognizer::NoHazard)
       break;
 
     ++Stalls;
   }
-  AdvanceToCycle(CurCycle + Stalls);
+  advanceToCycle(CurCycle + Stalls);
 }
 
 /// Record this SUnit in the HazardRecognizer.
 /// Does not update CurCycle.
-void ScheduleDAGRRList::EmitNode(SUnit *SU) {
+void ScheduleDAGRRList::emitNode(SUnit *SU) {
   if (!HazardRec->isEnabled())
     return;
 
@@ -730,10 +724,10 @@
 
 static void resetVRegCycle(SUnit *SU);
 
-/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
+/// scheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
 /// count of its predecessors. If a predecessor pending count is zero, add it to
 /// the Available queue.
-void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
+void ScheduleDAGRRList::scheduleNodeBottomUp(SUnit *SU) {
   LLVM_DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
   LLVM_DEBUG(dumpNode(*SU));
 
@@ -750,7 +744,7 @@
   SU->setHeightToAtLeast(CurCycle);
 
   // Reserve resources for the scheduled instruction.
-  EmitNode(SU);
+  emitNode(SU);
 
   Sequence.push_back(SU);
 
@@ -760,14 +754,14 @@
   // advance CurCycle before ReleasePredecessors to avoid useless pushes to
   // PendingQueue for schedulers that implement HasReadyFilter.
   if (!HazardRec->isEnabled() && AvgIPC < 2)
-    AdvanceToCycle(CurCycle + 1);
+    advanceToCycle(CurCycle + 1);
 
   // Update liveness of predecessors before successors to avoid treating a
   // two-address node as a live range def.
-  ReleasePredecessors(SU);
+  releasePredecessors(SU);
 
   // Release all the implicit physical register defs that are live.
-  for (SDep &Succ : SU->Succs) {
+  for (const SDep &Succ : SU->Succs) {
     // LiveRegDegs[Succ.getReg()] != SU when SU is a two-address node.
     if (Succ.isAssignedRegDep() && LiveRegDefs[Succ.getReg()] == SU) {
       assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
@@ -808,16 +802,16 @@
   if (HazardRec->isEnabled() || AvgIPC > 1) {
     if (SU->getNode() && SU->getNode()->isMachineOpcode())
       ++IssueCount;
-    if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
-        || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
-      AdvanceToCycle(CurCycle + 1);
+    if ((HazardRec->isEnabled() && HazardRec->atIssueLimit()) ||
+        (!HazardRec->isEnabled() && IssueCount == AvgIPC))
+      advanceToCycle(CurCycle + 1);
   }
 }
 
-/// CapturePred - This does the opposite of ReleasePred. Since SU is being
+/// capturePred - This does the opposite of releasePred. Since SU is being
 /// unscheduled, increase the succ left count of its predecessors. Remove
 /// them from AvailableQueue if necessary.
-void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
+void ScheduleDAGRRList::capturePred(SDep *PredEdge) {
   SUnit *PredSU = PredEdge->getSUnit();
   if (PredSU->isAvailable) {
     PredSU->isAvailable = false;
@@ -830,15 +824,15 @@
   ++PredSU->NumSuccsLeft;
 }
 
-/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
+/// unscheduleNodeBottomUp - Remove the node from the schedule, update its and
 /// its predecessor states to reflect the change.
-void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
+void ScheduleDAGRRList::unscheduleNodeBottomUp(SUnit *SU) {
   LLVM_DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
   LLVM_DEBUG(dumpNode(*SU));
 
   for (SDep &Pred : SU->Preds) {
-    CapturePred(&Pred);
-    if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]){
+    capturePred(&Pred);
+    if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]) {
       assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
       assert(LiveRegDefs[Pred.getReg()] == Pred.getSUnit() &&
              "Physical register dependency violated?");
@@ -883,7 +877,7 @@
       }
     }
 
-  for (auto &Succ : SU->Succs) {
+  for (const auto &Succ : SU->Succs) {
     if (Succ.isAssignedRegDep()) {
       auto Reg = Succ.getReg();
       if (!LiveRegDefs[Reg])
@@ -915,8 +909,7 @@
     // Don't make available until backtracking is complete.
     SU->isPending = true;
     PendingQueue.push_back(SU);
-  }
-  else {
+  } else {
     AvailableQueue->push(SU);
   }
   AvailableQueue->unscheduledNode(SU);
@@ -924,11 +917,11 @@
 
 /// After backtracking, the hazard checker needs to be restored to a state
 /// corresponding the current cycle.
-void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
+void ScheduleDAGRRList::restoreHazardCheckerBottomUp() {
   HazardRec->Reset();
 
-  unsigned LookAhead = std::min((unsigned)Sequence.size(),
-                                HazardRec->getMaxLookAhead());
+  unsigned LookAhead =
+      std::min((unsigned)Sequence.size(), HazardRec->getMaxLookAhead());
   if (LookAhead == 0)
     return;
 
@@ -939,19 +932,19 @@
     for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
       HazardRec->RecedeCycle();
     }
-    EmitNode(SU);
+    emitNode(SU);
   }
 }
 
-/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
+/// backtrackBottomUp - Backtrack scheduling to a previous cycle specified in
 /// BTCycle in order to schedule a specific node.
-void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
+void ScheduleDAGRRList::backtrackBottomUp(SUnit *SU, SUnit *BtSU) {
   SUnit *OldSU = Sequence.back();
   while (true) {
     Sequence.pop_back();
     // FIXME: use ready cycle instead of height
     CurCycle = OldSU->getHeight();
-    UnscheduleNodeBottomUp(OldSU);
+    unscheduleNodeBottomUp(OldSU);
     AvailableQueue->setCurCycle(CurCycle);
     if (OldSU == BtSU)
       break;
@@ -960,9 +953,9 @@
 
   assert(!SU->isSucc(OldSU) && "Something is wrong!");
 
-  RestoreHazardCheckerBottomUp();
+  restoreHazardCheckerBottomUp();
 
-  ReleasePending();
+  releasePending();
 
   ++NumBacktracks;
 }
@@ -976,8 +969,8 @@
   return false;
 }
 
-/// TryUnfold - Attempt to unfold
-SUnit *ScheduleDAGRRList::TryUnfoldSU(SUnit *SU) {
+/// tryUnfoldSU - Attempt to unfold
+SUnit *ScheduleDAGRRList::tryUnfoldSU(SUnit *SU) {
   SDNode *N = SU->getNode();
   // Use while over if to ease fall through.
   SmallVector<SDNode *, 2> NewNodes;
@@ -997,9 +990,9 @@
   unsigned OldNumVals = SU->getNode()->getNumValues();
 
   // LoadNode may already exist. This can happen when there is another
-  // load from the same location and producing the same type of value
+  // load from the same location and producing the same type of value,
   // but it has different alignment or volatileness.
-  bool isNewLoad = true;
+  bool NewLoad = true;
   SUnit *LoadSU;
   if (LoadNode->getNodeId() != -1) {
     LoadSU = &SUnits[LoadNode->getNodeId()];
@@ -1007,16 +1000,16 @@
     // this would negate the benefit to unfolding so just return SU.
     if (LoadSU->isScheduled)
       return SU;
-    isNewLoad = false;
+    NewLoad = false;
   } else {
-    LoadSU = CreateNewSUnit(LoadNode);
+    LoadSU = createNewSUnit(LoadNode);
     LoadNode->setNodeId(LoadSU->NodeNum);
 
     InitNumRegDefsLeft(LoadSU);
     computeLatency(LoadSU);
   }
 
-  bool isNewN = true;
+  bool NewN = true;
   SUnit *NewSU;
   // This can only happen when isNewLoad is false.
   if (N->getNodeId() != -1) {
@@ -1026,14 +1019,14 @@
     if (NewSU->isScheduled) {
       return SU;
     }
-    isNewN = false;
+    NewN = false;
   } else {
-    NewSU = CreateNewSUnit(N);
+    NewSU = createNewSUnit(N);
     N->setNodeId(NewSU->NodeNum);
 
     const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
-    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
-      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
+    for (unsigned I = 0; I != MCID.getNumOperands(); ++I) {
+      if (MCID.getOperandConstraint(I, MCOI::TIED_TO) != -1) {
         NewSU->isTwoAddress = true;
         break;
       }
@@ -1048,8 +1041,8 @@
   LLVM_DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
 
   // Now that we are committed to unfolding replace DAG Uses.
-  for (unsigned i = 0; i != NumVals; ++i)
-    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
+  for (unsigned I = 0; I != NumVals; ++I)
+    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), I), SDValue(N, I));
   DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals - 1),
                                  SDValue(LoadNode, 1));
 
@@ -1059,7 +1052,7 @@
   SmallVector<SDep, 4> LoadPreds;
   SmallVector<SDep, 4> NodePreds;
   SmallVector<SDep, 4> NodeSuccs;
-  for (SDep &Pred : SU->Preds) {
+  for (const SDep &Pred : SU->Preds) {
     if (Pred.isCtrl())
       ChainPreds.push_back(Pred);
     else if (isOperandOf(Pred.getSUnit(), LoadNode))
@@ -1067,7 +1060,7 @@
     else
       NodePreds.push_back(Pred);
   }
-  for (SDep &Succ : SU->Succs) {
+  for (const SDep &Succ : SU->Succs) {
     if (Succ.isCtrl())
       ChainSuccs.push_back(Succ);
     else
@@ -1076,37 +1069,37 @@
 
   // Now assign edges to the newly-created nodes.
   for (const SDep &Pred : ChainPreds) {
-    RemovePred(SU, Pred);
-    if (isNewLoad)
-      AddPredQueued(LoadSU, Pred);
+    removePred(SU, Pred);
+    if (NewLoad)
+      addPredQueued(LoadSU, Pred);
   }
   for (const SDep &Pred : LoadPreds) {
-    RemovePred(SU, Pred);
-    if (isNewLoad)
-      AddPredQueued(LoadSU, Pred);
+    removePred(SU, Pred);
+    if (NewLoad)
+      addPredQueued(LoadSU, Pred);
   }
   for (const SDep &Pred : NodePreds) {
-    RemovePred(SU, Pred);
-    AddPredQueued(NewSU, Pred);
+    removePred(SU, Pred);
+    addPredQueued(NewSU, Pred);
   }
-  for (SDep D : NodeSuccs) {
+  for (SDep &D : NodeSuccs) {
     SUnit *SuccDep = D.getSUnit();
     D.setSUnit(SU);
-    RemovePred(SuccDep, D);
+    removePred(SuccDep, D);
     D.setSUnit(NewSU);
-    AddPredQueued(SuccDep, D);
+    addPredQueued(SuccDep, D);
     // Balance register pressure.
     if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled &&
         !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
       --NewSU->NumRegDefsLeft;
   }
-  for (SDep D : ChainSuccs) {
+  for (SDep &D : ChainSuccs) {
     SUnit *SuccDep = D.getSUnit();
     D.setSUnit(SU);
-    RemovePred(SuccDep, D);
-    if (isNewLoad) {
+    removePred(SuccDep, D);
+    if (NewLoad) {
       D.setSUnit(LoadSU);
-      AddPredQueued(SuccDep, D);
+      addPredQueued(SuccDep, D);
     }
   }
 
@@ -1114,11 +1107,11 @@
   // by LoadSU.
   SDep D(LoadSU, SDep::Data, 0);
   D.setLatency(LoadSU->Latency);
-  AddPredQueued(NewSU, D);
+  addPredQueued(NewSU, D);
 
-  if (isNewLoad)
+  if (NewLoad)
     AvailableQueue->addNode(LoadSU);
-  if (isNewN)
+  if (NewN)
     AvailableQueue->addNode(NewSU);
 
   ++NumUnfolds;
@@ -1129,9 +1122,9 @@
   return NewSU;
 }
 
-/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
+/// copyAndMoveSuccessors - Clone the specified node and move its scheduled
 /// successors to the newly created node.
-SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
+SUnit *ScheduleDAGRRList::copyAndMoveSuccessors(SUnit *SU) {
   SDNode *N = SU->getNode();
   if (!N)
     return nullptr;
@@ -1139,23 +1132,22 @@
   LLVM_DEBUG(dbgs() << "Considering duplicating the SU\n");
   LLVM_DEBUG(dumpNode(*SU));
 
-  if (N->getGluedNode() &&
-      !TII->canCopyGluedNodeDuringSchedule(N)) {
+  if (N->getGluedNode() && !TII->canCopyGluedNodeDuringSchedule(N)) {
     LLVM_DEBUG(
-        dbgs()
-        << "Giving up because it has incoming glue and the target does not "
-           "want to copy it\n");
+        dbgs() << "Giving up because it has incoming glue and the target does"
+                  " not want to copy it\n");
     return nullptr;
   }
 
   SUnit *NewSU;
   bool TryUnfold = false;
-  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
-    MVT VT = N->getSimpleValueType(i);
+  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) {
+    MVT VT = N->getSimpleValueType(I);
     if (VT == MVT::Glue) {
       LLVM_DEBUG(dbgs() << "Giving up because it has outgoing glue\n");
       return nullptr;
-    } else if (VT == MVT::Other)
+    }
+    if (VT == MVT::Other)
       TryUnfold = true;
   }
   for (const SDValue &Op : N->op_values()) {
@@ -1170,7 +1162,7 @@
 
   // If possible unfold instruction.
   if (TryUnfold) {
-    SUnit *UnfoldSU = TryUnfoldSU(SU);
+    SUnit *UnfoldSU = tryUnfoldSU(SU);
     if (!UnfoldSU)
       return nullptr;
     SU = UnfoldSU;
@@ -1181,34 +1173,34 @@
   }
 
   LLVM_DEBUG(dbgs() << "    Duplicating SU #" << SU->NodeNum << "\n");
-  NewSU = CreateClone(SU);
+  NewSU = createClone(SU);
 
   // New SUnit has the exact same predecessors.
-  for (SDep &Pred : SU->Preds)
+  for (const SDep &Pred : SU->Preds)
     if (!Pred.isArtificial())
-      AddPredQueued(NewSU, Pred);
+      addPredQueued(NewSU, Pred);
 
   // Make sure the clone comes after the original. (InstrEmitter assumes
   // this ordering.)
-  AddPredQueued(NewSU, SDep(SU, SDep::Artificial));
+  addPredQueued(NewSU, SDep(SU, SDep::Artificial));
 
   // Only copy scheduled successors. Cut them from old node's successor
   // list and move them over.
   SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
-  for (SDep &Succ : SU->Succs) {
+  for (const SDep &Succ : SU->Succs) {
     if (Succ.isArtificial())
       continue;
     SUnit *SuccSU = Succ.getSUnit();
     if (SuccSU->isScheduled) {
       SDep D = Succ;
       D.setSUnit(NewSU);
-      AddPredQueued(SuccSU, D);
+      addPredQueued(SuccSU, D);
       D.setSUnit(SU);
-      DelDeps.push_back(std::make_pair(SuccSU, D));
+      DelDeps.emplace_back(SuccSU, D);
     }
   }
-  for (auto &DelDep : DelDeps)
-    RemovePred(DelDep.first, DelDep.second);
+  for (const auto &[DelSU, DelD] : DelDeps)
+    removePred(DelSU, DelD);
 
   AvailableQueue->updateNode(SU);
   AvailableQueue->addNode(NewSU);
@@ -1217,49 +1209,47 @@
   return NewSU;
 }
 
-/// InsertCopiesAndMoveSuccs - Insert register copies and move all
+/// insertCopiesAndMoveSuccs - Insert register copies and move all
 /// scheduled successors of the given SUnit to the last copy.
-void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
-                                              const TargetRegisterClass *DestRC,
-                                              const TargetRegisterClass *SrcRC,
-                                              SmallVectorImpl<SUnit*> &Copies) {
-  SUnit *CopyFromSU = CreateNewSUnit(nullptr);
+void ScheduleDAGRRList::insertCopiesAndMoveSuccs(
+    SUnit *SU, unsigned Reg, const TargetRegisterClass *DestRC,
+    const TargetRegisterClass *SrcRC, SmallVectorImpl<SUnit *> &Copies) {
+  SUnit *CopyFromSU = createNewSUnit(nullptr);
   CopyFromSU->CopySrcRC = SrcRC;
   CopyFromSU->CopyDstRC = DestRC;
 
-  SUnit *CopyToSU = CreateNewSUnit(nullptr);
+  SUnit *CopyToSU = createNewSUnit(nullptr);
   CopyToSU->CopySrcRC = DestRC;
   CopyToSU->CopyDstRC = SrcRC;
 
   // Only copy scheduled successors. Cut them from old node's successor
   // list and move them over.
   SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
-  for (SDep &Succ : SU->Succs) {
+  for (const SDep &Succ : SU->Succs) {
     if (Succ.isArtificial())
       continue;
     SUnit *SuccSU = Succ.getSUnit();
     if (SuccSU->isScheduled) {
       SDep D = Succ;
       D.setSUnit(CopyToSU);
-      AddPredQueued(SuccSU, D);
-      DelDeps.push_back(std::make_pair(SuccSU, Succ));
-    }
-    else {
-      // Avoid scheduling the def-side copy before other successors. Otherwise
+      addPredQueued(SuccSU, D);
+      DelDeps.emplace_back(SuccSU, Succ);
+    } else {
+      // Avoid scheduling the def-side copy before other successors. Otherwise,
       // we could introduce another physreg interference on the copy and
       // continue inserting copies indefinitely.
-      AddPredQueued(SuccSU, SDep(CopyFromSU, SDep::Artificial));
+      addPredQueued(SuccSU, SDep(CopyFromSU, SDep::Artificial));
     }
   }
-  for (auto &DelDep : DelDeps)
-    RemovePred(DelDep.first, DelDep.second);
+  for (const auto &[DelSU, DelD] : DelDeps)
+    removePred(DelSU, DelD);
 
   SDep FromDep(SU, SDep::Data, Reg);
   FromDep.setLatency(SU->Latency);
-  AddPredQueued(CopyFromSU, FromDep);
+  addPredQueued(CopyFromSU, FromDep);
   SDep ToDep(CopyFromSU, SDep::Data, 0);
   ToDep.setLatency(CopyFromSU->Latency);
-  AddPredQueued(CopyToSU, ToDep);
+  addPredQueued(CopyToSU, ToDep);
 
   AvailableQueue->updateNode(SU);
   AvailableQueue->addNode(CopyFromSU);
@@ -1281,7 +1271,8 @@
     NumRes = 1;
   } else {
     const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
-    assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
+    assert(MCID.ImplicitDefs &&
+           "Physical reg def must be in implicit def list!");
     NumRes = MCID.getNumDefs();
     for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
       if (Reg == *ImpDef)
@@ -1292,9 +1283,9 @@
   return N->getSimpleValueType(NumRes);
 }
 
-/// CheckForLiveRegDef - Return true and update live register vector if the
+/// checkForLiveRegDef - Return true and update live register vector if the
 /// specified register def of the specified SUnit clobbers any "live" registers.
-static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, SUnit **LiveRegDefs,
+static void checkForLiveRegDef(SUnit *SU, unsigned Reg, SUnit **LiveRegDefs,
                                SmallSet<unsigned, 4> &RegAdded,
                                SmallVectorImpl<unsigned> &LRegs,
                                const TargetRegisterInfo *TRI,
@@ -1302,10 +1293,12 @@
   for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
 
     // Check if Ref is live.
-    if (!LiveRegDefs[*AliasI]) continue;
+    if (!LiveRegDefs[*AliasI])
+      continue;
 
     // Allow multiple uses of the same def.
-    if (LiveRegDefs[*AliasI] == SU) continue;
+    if (LiveRegDefs[*AliasI] == SU)
+      continue;
 
     // Allow multiple uses of same def
     if (Node && LiveRegDefs[*AliasI]->getNode() == Node)
@@ -1318,19 +1311,22 @@
   }
 }
 
-/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
+/// checkForLiveRegDefMasked - Check for any live physregs that are clobbered
 /// by RegMask, and add them to LRegs.
-static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
-                                     ArrayRef<SUnit*> LiveRegDefs,
+static void checkForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
+                                     ArrayRef<SUnit *> LiveRegDefs,
                                      SmallSet<unsigned, 4> &RegAdded,
                                      SmallVectorImpl<unsigned> &LRegs) {
   // Look at all live registers. Skip Reg0 and the special CallResource.
-  for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
-    if (!LiveRegDefs[i]) continue;
-    if (LiveRegDefs[i] == SU) continue;
-    if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
-    if (RegAdded.insert(i).second)
-      LRegs.push_back(i);
+  for (unsigned I = 1, E = LiveRegDefs.size() - 1; I != E; ++I) {
+    if (!LiveRegDefs[I])
+      continue;
+    if (LiveRegDefs[I] == SU)
+      continue;
+    if (!MachineOperand::clobbersPhysReg(RegMask, I))
+      continue;
+    if (RegAdded.insert(I).second)
+      LRegs.push_back(I);
   }
 }
 
@@ -1342,12 +1338,12 @@
   return nullptr;
 }
 
-/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
+/// delayForLiveRegsBottomUp - Returns true if it is necessary to delay
 /// scheduling of the given node to satisfy live physical register dependencies.
 /// If the specific node is the last one that's available to schedule, do
 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
-bool ScheduleDAGRRList::
-DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
+bool ScheduleDAGRRList::delayForLiveRegsBottomUp(
+    SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
   if (NumLiveRegs == 0)
     return false;
 
@@ -1356,9 +1352,9 @@
   //
   // If SU is the currently live definition of the same register that it uses,
   // then we are free to schedule it.
-  for (SDep &Pred : SU->Preds) {
+  for (const SDep &Pred : SU->Preds) {
     if (Pred.isAssignedRegDep() && LiveRegDefs[Pred.getReg()] != SU)
-      CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(),
+      checkForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(),
                          RegAdded, LRegs, TRI);
   }
 
@@ -1367,26 +1363,27 @@
         Node->getOpcode() == ISD::INLINEASM_BR) {
       // Inline asm can clobber physical defs.
       unsigned NumOps = Node->getNumOperands();
-      if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
-        --NumOps;  // Ignore the glue operand.
+      if (Node->getOperand(NumOps - 1).getValueType() == MVT::Glue)
+        --NumOps; // Ignore the glue operand.
 
-      for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
+      for (unsigned I = InlineAsm::Op_FirstOperand; I != NumOps;) {
         unsigned Flags =
-          cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+            cast<ConstantSDNode>(Node->getOperand(I))->getZExtValue();
         unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
 
-        ++i; // Skip the ID value.
+        ++I; // Skip the ID value.
         if (InlineAsm::isRegDefKind(Flags) ||
             InlineAsm::isRegDefEarlyClobberKind(Flags) ||
             InlineAsm::isClobberKind(Flags)) {
           // Check for def of register or earlyclobber register.
-          for (; NumVals; --NumVals, ++i) {
-            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+          for (; NumVals; --NumVals, ++I) {
+            Register Reg = cast<RegisterSDNode>(Node->getOperand(I))->getReg();
             if (Register::isPhysicalRegister(Reg))
-              CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
+              checkForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs,
+                                 TRI);
           }
         } else
-          i += NumVals;
+          I += NumVals;
       }
       continue;
     }
@@ -1395,7 +1392,7 @@
       Register Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
       if (Reg.isPhysical()) {
         SDNode *SrcNode = Node->getOperand(2).getNode();
-        CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI,
+        checkForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI,
                            SrcNode);
       }
     }
@@ -1412,15 +1409,15 @@
         SDNode *Gen = LiveRegGens[CallResource]->getNode();
         while (SDNode *Glued = Gen->getGluedNode())
           Gen = Glued;
-        if (!IsChainDependent(Gen, Node, 0, TII) &&
+        if (!isChainDependent(Gen, Node, 0, TII) &&
             RegAdded.insert(CallResource).second)
           LRegs.push_back(CallResource);
       }
     }
     if (const uint32_t *RegMask = getNodeRegMask(Node))
-      CheckForLiveRegDefMasked(SU, RegMask,
-                               makeArrayRef(LiveRegDefs.get(), TRI->getNumRegs()),
-                               RegAdded, LRegs);
+      checkForLiveRegDefMasked(
+          SU, RegMask, makeArrayRef(LiveRegDefs.get(), TRI->getNumRegs()),
+          RegAdded, LRegs);
 
     const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
     if (MCID.hasOptionalDef()) {
@@ -1428,17 +1425,18 @@
       // This operand can be either a def of CPSR, if the S bit is set; or a use
       // of %noreg.  When the OptionalDef is set to a valid register, we need to
       // handle it in the same way as an ImplicitDef.
-      for (unsigned i = 0; i < MCID.getNumDefs(); ++i)
-        if (MCID.OpInfo[i].isOptionalDef()) {
-          const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues());
-          unsigned Reg = cast<RegisterSDNode>(OptionalDef)->getReg();
-          CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
+      for (unsigned I = 0; I < MCID.getNumDefs(); ++I)
+        if (MCID.OpInfo[I].isOptionalDef()) {
+          const SDValue &OptionalDef =
+              Node->getOperand(I - Node->getNumValues());
+          Register Reg = cast<RegisterSDNode>(OptionalDef)->getReg();
+          checkForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
         }
     }
     if (!MCID.ImplicitDefs)
       continue;
     for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
-      CheckForLiveRegDef(SU, *Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
+      checkForLiveRegDef(SU, *Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
   }
 
   return !LRegs.empty();
@@ -1446,8 +1444,8 @@
 
 void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
   // Add the nodes that aren't ready back onto the available list.
-  for (unsigned i = Interferences.size(); i > 0; --i) {
-    SUnit *SU = Interferences[i-1];
+  for (unsigned I = Interferences.size(); I > 0; --I) {
+    SUnit *SU = Interferences[I - 1];
     LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
     if (Reg) {
       SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
@@ -1462,8 +1460,8 @@
       LLVM_DEBUG(dbgs() << "    Repushing SU #" << SU->NodeNum << '\n');
       AvailableQueue->push(SU);
     }
-    if (i < Interferences.size())
-      Interferences[i-1] = Interferences.back();
+    if (I < Interferences.size())
+      Interferences[I - 1] = Interferences.back();
     Interferences.pop_back();
     LRegsMap.erase(LRegsPos);
   }
@@ -1473,27 +1471,25 @@
 /// (1) Ready: latency has been satisfied
 /// (2) No Hazards: resources are available
 /// (3) No Interferences: may unschedule to break register interferences.
-SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
+SUnit *ScheduleDAGRRList::pickNodeToScheduleBottomUp() {
   SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
   auto FindAvailableNode = [&]() {
     while (CurSU) {
       SmallVector<unsigned, 4> LRegs;
-      if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
+      if (!delayForLiveRegsBottomUp(CurSU, LRegs))
         break;
       LLVM_DEBUG(dbgs() << "    Interfering reg ";
                  if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource";
                  else dbgs() << printReg(LRegs[0], TRI);
                  dbgs() << " SU #" << CurSU->NodeNum << '\n');
-      std::pair<LRegsMapT::iterator, bool> LRegsPair =
-        LRegsMap.insert(std::make_pair(CurSU, LRegs));
-      if (LRegsPair.second) {
-        CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
+      auto [LRegsIter, LRegsInserted] = LRegsMap.try_emplace(CurSU, LRegs);
+      if (LRegsInserted) {
+        CurSU->isPending = true; // This SU is not in AvailableQueue right now.
         Interferences.push_back(CurSU);
-      }
-      else {
+      } else {
         assert(CurSU->isPending && "Interferences are pending");
         // Update the interference with current live regs.
-        LRegsPair.first->second = LRegs;
+        LRegsIter->second = LRegs;
       }
       CurSU = AvailableQueue->pop();
     }
@@ -1523,9 +1519,9 @@
         LiveCycle = BtSU->getHeight();
       }
     }
-    if (!WillCreateCycle(TrySU, BtSU))  {
+    if (!willCreateCycle(TrySU, BtSU)) {
       // BacktrackBottomUp mutates Interferences!
-      BacktrackBottomUp(TrySU, BtSU);
+      backtrackBottomUp(TrySU, BtSU);
 
       // Force the current node to be scheduled before the node that
       // requires the physical reg dep.
@@ -1536,7 +1532,7 @@
       }
       LLVM_DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum
                         << ") to SU(" << TrySU->NodeNum << ")\n");
-      AddPredQueued(TrySU, SDep(BtSU, SDep::Artificial));
+      addPredQueued(TrySU, SDep(BtSU, SDep::Artificial));
 
       // If one or more successors has been unscheduled, then the current
       // node is no longer available.
@@ -1567,37 +1563,36 @@
     unsigned Reg = LRegs[0];
     SUnit *LRDef = LiveRegDefs[Reg];
     MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
-    const TargetRegisterClass *RC =
-      TRI->getMinimalPhysRegClass(Reg, VT);
+    const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
     const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
 
     // If cross copy register class is the same as RC, then it must be possible
     // copy the value directly. Do not try duplicate the def.
     // If cross copy register class is not the same as RC, then it's possible to
-    // copy the value but it require cross register class copies and it is
+    // copy the value, but it requires cross register class copies, and it is
     // expensive.
     // If cross copy register class is null, then it's not possible to copy
     // the value at all.
     SUnit *NewDef = nullptr;
     if (DestRC != RC) {
-      NewDef = CopyAndMoveSuccessors(LRDef);
+      NewDef = copyAndMoveSuccessors(LRDef);
       if (!DestRC && !NewDef)
         report_fatal_error("Can't handle live physical register dependency!");
     }
     if (!NewDef) {
       // Issue copies, these can be expensive cross register class copies.
-      SmallVector<SUnit*, 2> Copies;
-      InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
+      SmallVector<SUnit *, 2> Copies;
+      insertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
       LLVM_DEBUG(dbgs() << "    Adding an edge from SU #" << TrySU->NodeNum
                         << " to SU #" << Copies.front()->NodeNum << "\n");
-      AddPredQueued(TrySU, SDep(Copies.front(), SDep::Artificial));
+      addPredQueued(TrySU, SDep(Copies.front(), SDep::Artificial));
       NewDef = Copies.back();
     }
 
     LLVM_DEBUG(dbgs() << "    Adding an edge from SU #" << NewDef->NodeNum
                       << " to SU #" << TrySU->NodeNum << "\n");
     LiveRegDefs[Reg] = NewDef;
-    AddPredQueued(NewDef, SDep(TrySU, SDep::Artificial));
+    addPredQueued(NewDef, SDep(TrySU, SDep::Artificial));
     TrySU->isAvailable = false;
     CurSU = NewDef;
   }
@@ -1605,11 +1600,11 @@
   return CurSU;
 }
 
-/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
+/// listScheduleBottomUp - The main loop of list scheduling for bottom-up
 /// schedulers.
-void ScheduleDAGRRList::ListScheduleBottomUp() {
+void ScheduleDAGRRList::listScheduleBottomUp() {
   // Release any predecessors of the special Exit node.
-  ReleasePredecessors(&ExitSU);
+  releasePredecessors(&ExitSU);
 
   // Add root to Available queue.
   if (!SUnits.empty()) {
@@ -1628,17 +1623,17 @@
 
     // Pick the best node to schedule taking all constraints into
     // consideration.
-    SUnit *SU = PickNodeToScheduleBottomUp();
+    SUnit *SU = pickNodeToScheduleBottomUp();
 
-    AdvancePastStalls(SU);
+    advancePastStalls(SU);
 
-    ScheduleNodeBottomUp(SU);
+    scheduleNodeBottomUp(SU);
 
     while (AvailableQueue->empty() && !PendingQueue.empty()) {
       // Advance the cycle to free resources. Skip ahead to the next ready SU.
       assert(MinAvailableCycle < std::numeric_limits<unsigned>::max() &&
              "MinAvailableCycle uninitialized");
-      AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
+      advanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
     }
   }
 
@@ -1655,20 +1650,19 @@
 class RegReductionPQBase;
 
 struct queue_sort {
-  bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
+  bool isReady(SUnit *SU, unsigned CurCycle) const { return true; }
 };
 
 #ifndef NDEBUG
-template<class SF>
-struct reverse_sort : public queue_sort {
+template <class SF> struct reverse_sort : public queue_sort {
   SF &SortFunc;
 
-  reverse_sort(SF &sf) : SortFunc(sf) {}
+  reverse_sort(SF &SortFunc) : SortFunc(SortFunc) {}
 
-  bool operator()(SUnit* left, SUnit* right) const {
-    // reverse left/right rather than simply !SortFunc(left, right)
+  bool operator()(SUnit *Left, SUnit *Right) const {
+    // reverse left/right rather than simply !SortFunc(Left, Right)
     // to expose different paths in the comparison logic.
-    return SortFunc(right, left);
+    return SortFunc(Right, Left);
   }
 };
 #endif // NDEBUG
@@ -1676,63 +1670,51 @@
 /// bu_ls_rr_sort - Priority function for bottom up register pressure
 // reduction scheduler.
 struct bu_ls_rr_sort : public queue_sort {
-  enum {
-    IsBottomUp = true,
-    HasReadyFilter = false
-  };
+  enum { IsBottomUp = true, HasReadyFilter = false };
 
   RegReductionPQBase *SPQ;
 
-  bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+  bu_ls_rr_sort(RegReductionPQBase *SPQ) : SPQ(SPQ) {}
 
-  bool operator()(SUnit* left, SUnit* right) const;
+  bool operator()(SUnit *Left, SUnit *Right) const;
 };
 
 // src_ls_rr_sort - Priority function for source order scheduler.
 struct src_ls_rr_sort : public queue_sort {
-  enum {
-    IsBottomUp = true,
-    HasReadyFilter = false
-  };
+  enum { IsBottomUp = true, HasReadyFilter = false };
 
   RegReductionPQBase *SPQ;
 
-  src_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+  src_ls_rr_sort(RegReductionPQBase *SPQ) : SPQ(SPQ) {}
 
-  bool operator()(SUnit* left, SUnit* right) const;
+  bool operator()(SUnit *Left, SUnit *Right) const;
 };
 
 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
 struct hybrid_ls_rr_sort : public queue_sort {
-  enum {
-    IsBottomUp = true,
-    HasReadyFilter = false
-  };
+  enum { IsBottomUp = true, HasReadyFilter = false };
 
   RegReductionPQBase *SPQ;
 
-  hybrid_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+  hybrid_ls_rr_sort(RegReductionPQBase *SPQ) : SPQ(SPQ) {}
 
   bool isReady(SUnit *SU, unsigned CurCycle) const;
 
-  bool operator()(SUnit* left, SUnit* right) const;
+  bool operator()(SUnit *Left, SUnit *Right) const;
 };
 
 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
 // scheduler.
 struct ilp_ls_rr_sort : public queue_sort {
-  enum {
-    IsBottomUp = true,
-    HasReadyFilter = false
-  };
+  enum { IsBottomUp = true, HasReadyFilter = false };
 
   RegReductionPQBase *SPQ;
 
-  ilp_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+  ilp_ls_rr_sort(RegReductionPQBase *SPQ) : SPQ(SPQ) {}
 
   bool isReady(SUnit *SU, unsigned CurCycle) const;
 
-  bool operator()(SUnit* left, SUnit* right) const;
+  bool operator()(SUnit *Left, SUnit *Right) const;
 };
 
 class RegReductionPQBase : public SchedulingPriorityQueue {
@@ -1749,9 +1731,9 @@
   const TargetInstrInfo *TII;
   const TargetRegisterInfo *TRI;
   const TargetLowering *TLI;
-  ScheduleDAGRRList *scheduleDAG = nullptr;
+  ScheduleDAGRRList *ScheduleDAG = nullptr;
 
-  // SethiUllmanNumbers - The SethiUllman number for each node.
+  // SethiUllmanNumbers - The Sethi-Ullman number for each node.
   std::vector<unsigned> SethiUllmanNumbers;
 
   /// RegPressure - Tracking current reg pressure per register class.
@@ -1762,15 +1744,13 @@
   std::vector<unsigned> RegLimit;
 
 public:
-  RegReductionPQBase(MachineFunction &mf,
-                     bool hasReadyFilter,
-                     bool tracksrp,
-                     bool srcorder,
-                     const TargetInstrInfo *tii,
-                     const TargetRegisterInfo *tri,
-                     const TargetLowering *tli)
-    : SchedulingPriorityQueue(hasReadyFilter), TracksRegPressure(tracksrp),
-      SrcOrder(srcorder), MF(mf), TII(tii), TRI(tri), TLI(tli) {
+  RegReductionPQBase(MachineFunction &MF, bool HasReadyFilter,
+                     bool TracksRegPressure, bool SrcOrder,
+                     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
+                     const TargetLowering *TLI)
+      : SchedulingPriorityQueue(HasReadyFilter),
+        TracksRegPressure(TracksRegPressure), SrcOrder(SrcOrder), MF(MF),
+        TII(TII), TRI(TRI), TLI(TLI) {
     if (TracksRegPressure) {
       unsigned NumRC = TRI->getNumRegClasses();
       RegLimit.resize(NumRC);
@@ -1778,19 +1758,19 @@
       std::fill(RegLimit.begin(), RegLimit.end(), 0);
       std::fill(RegPressure.begin(), RegPressure.end(), 0);
       for (const TargetRegisterClass *RC : TRI->regclasses())
-        RegLimit[RC->getID()] = tri->getRegPressureLimit(RC, MF);
+        RegLimit[RC->getID()] = TRI->getRegPressureLimit(RC, MF);
     }
   }
 
-  void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
-    scheduleDAG = scheduleDag;
+  void setScheduleDAG(ScheduleDAGRRList *ScheduleDag) {
+    this->ScheduleDAG = ScheduleDag;
   }
 
-  ScheduleHazardRecognizer* getHazardRec() {
-    return scheduleDAG->getHazardRec();
+  ScheduleHazardRecognizer *getHazardRec() {
+    return ScheduleDAG->getHazardRec();
   }
 
-  void initNodes(std::vector<SUnit> &sunits) override;
+  void initNodes(std::vector<SUnit> &SUs) override;
 
   void addNode(const SUnit *SU) override;
 
@@ -1805,7 +1785,8 @@
   unsigned getNodePriority(const SUnit *SU) const;
 
   unsigned getNodeOrdering(const SUnit *SU) const {
-    if (!SU->getNode()) return 0;
+    if (!SU->getNode())
+      return 0;
 
     return SU->getNode()->getIROrder();
   }
@@ -1832,11 +1813,11 @@
 
   void dumpRegPressure() const;
 
-  bool HighRegPressure(const SUnit *SU) const;
+  bool highRegPressure(const SUnit *SU) const;
 
-  bool MayReduceRegPressure(SUnit *SU) const;
+  bool mayReduceRegPressure(const SUnit *SU) const;
 
-  int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
+  int regPressureDiff(const SUnit *SU, unsigned &LiveUses) const;
 
   void scheduledNode(SUnit *SU) override;
 
@@ -1844,12 +1825,12 @@
 
 protected:
   bool canClobber(const SUnit *SU, const SUnit *Op);
-  void AddPseudoTwoAddrDeps();
-  void PrescheduleNodesWithMultipleUses();
-  void CalculateSethiUllmanNumbers();
+  void addPseudoTwoAddrDeps();
+  void preScheduleNodesWithMultipleUses();
+  void calculateSethiUllmanNumbers();
 };
 
-template<class SF>
+template <class SF>
 static SUnit *popFromQueueImpl(std::vector<SUnit *> &Q, SF &Picker) {
   unsigned BestIdx = 0;
   // Only compute the cost for the first 1000 items in the queue, to avoid
@@ -1865,7 +1846,7 @@
   return V;
 }
 
-template<class SF>
+template <class SF>
 SUnit *popFromQueue(std::vector<SUnit *> &Q, SF &Picker, ScheduleDAG *DAG) {
 #ifndef NDEBUG
   if (DAG->StressSched) {
@@ -1884,20 +1865,18 @@
 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
 // to reduce register pressure.
 //
-template<class SF>
+template <class SF>
 class RegReductionPriorityQueue : public RegReductionPQBase {
   SF Picker;
 
 public:
-  RegReductionPriorityQueue(MachineFunction &mf,
-                            bool tracksrp,
-                            bool srcorder,
-                            const TargetInstrInfo *tii,
-                            const TargetRegisterInfo *tri,
-                            const TargetLowering *tli)
-    : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
-                         tii, tri, tli),
-      Picker(this) {}
+  RegReductionPriorityQueue(MachineFunction &MF, bool TracksRegPressure,
+                            bool SrcOrder, const TargetInstrInfo *TII,
+                            const TargetRegisterInfo *TRI,
+                            const TargetLowering *TLI)
+      : RegReductionPQBase(MF, SF::HasReadyFilter, TracksRegPressure, SrcOrder,
+                           TII, TRI, TLI),
+        Picker(this) {}
 
   bool isBottomUp() const override { return SF::IsBottomUp; }
 
@@ -1906,20 +1885,21 @@
   }
 
   SUnit *pop() override {
-    if (Queue.empty()) return nullptr;
+    if (Queue.empty())
+      return nullptr;
 
-    SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
+    SUnit *V = popFromQueue(Queue, Picker, ScheduleDAG);
     V->NodeQueueId = 0;
     return V;
   }
 
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-  LLVM_DUMP_METHOD void dump(ScheduleDAG *DAG) const override {
+  LLVM_DUMP_METHOD void dump(llvm::ScheduleDAG *DAG) const override {
     // Emulate pop() without clobbering NodeQueueIds.
     std::vector<SUnit *> DumpQueue = Queue;
     SF DumpPicker = Picker;
     while (!DumpQueue.empty()) {
-      SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
+      SUnit *SU = popFromQueue(DumpQueue, DumpPicker, ScheduleDAG);
       dbgs() << "Height " << SU->getHeight() << ": ";
       DAG->dumpNode(*SU);
     }
@@ -1939,23 +1919,23 @@
 //===----------------------------------------------------------------------===//
 
 // Check for special nodes that bypass scheduling heuristics.
-// Currently this pushes TokenFactor nodes down, but may be used for other
+// Currently, this pushes TokenFactor nodes down, but may be used for other
 // pseudo-ops as well.
 //
 // Return -1 to schedule right above left, 1 for left above right.
 // Return 0 if no bias exists.
-static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
-  bool LSchedLow = left->isScheduleLow;
-  bool RSchedLow = right->isScheduleLow;
+static int checkSpecialNodes(const SUnit *Left, const SUnit *Right) {
+  bool LSchedLow = Left->isScheduleLow;
+  bool RSchedLow = Right->isScheduleLow;
   if (LSchedLow != RSchedLow)
     return LSchedLow < RSchedLow ? 1 : -1;
   return 0;
 }
 
-/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
+/// calcNodeSethiUllmanNumber - Compute Sethi Ullman number.
 /// Smaller number is the higher priority.
-static unsigned
-CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
+static unsigned calcNodeSethiUllmanNumber(const SUnit *SU,
+                                          std::vector<unsigned> &SUNumbers) {
   if (SUNumbers[SU->NodeNum] != 0)
     return SUNumbers[SU->NodeNum];
 
@@ -1975,12 +1955,13 @@
     // Try to find a non-evaluated pred and push it into the processing stack.
     for (unsigned P = Temp.PredsProcessed; P < TempSU->Preds.size(); ++P) {
       auto &Pred = TempSU->Preds[P];
-      if (Pred.isCtrl()) continue;  // ignore chain preds
+      if (Pred.isCtrl())
+        continue; // ignore chain preds
       SUnit *PredSU = Pred.getSUnit();
       if (SUNumbers[PredSU->NodeNum] == 0) {
 #ifndef NDEBUG
         // In debug mode, check that we don't have such element in the stack.
-        for (auto It : WorkList)
+        for (const auto &It : WorkList)
           assert(It.SU != PredSU && "Trying to push an element twice?");
 #endif
         // Next time start processing this one starting from the next pred.
@@ -1998,7 +1979,8 @@
     unsigned SethiUllmanNumber = 0;
     unsigned Extra = 0;
     for (const SDep &Pred : TempSU->Preds) {
-      if (Pred.isCtrl()) continue;  // ignore chain preds
+      if (Pred.isCtrl())
+        continue; // ignore chain preds
       SUnit *PredSU = Pred.getSUnit();
       unsigned PredSethiUllman = SUNumbers[PredSU->NodeNum];
       assert(PredSethiUllman > 0 && "We should have evaluated this pred!");
@@ -2020,25 +2002,25 @@
   return SUNumbers[SU->NodeNum];
 }
 
-/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
+/// calculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
 /// scheduling units.
-void RegReductionPQBase::CalculateSethiUllmanNumbers() {
+void RegReductionPQBase::calculateSethiUllmanNumbers() {
   SethiUllmanNumbers.assign(SUnits->size(), 0);
 
   for (const SUnit &SU : *SUnits)
-    CalcNodeSethiUllmanNumber(&SU, SethiUllmanNumbers);
+    calcNodeSethiUllmanNumber(&SU, SethiUllmanNumbers);
 }
 
 void RegReductionPQBase::addNode(const SUnit *SU) {
   unsigned SUSize = SethiUllmanNumbers.size();
   if (SUnits->size() > SUSize)
-    SethiUllmanNumbers.resize(SUSize*2, 0);
-  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
+    SethiUllmanNumbers.resize(SUSize * 2, 0);
+  calcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
 }
 
 void RegReductionPQBase::updateNode(const SUnit *SU) {
   SethiUllmanNumbers[SU->NodeNum] = 0;
-  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
+  calcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
 }
 
 // Lower priority means schedule further down. For bottom-up scheduling, lower
@@ -2051,15 +2033,14 @@
     // avoid spilling.
     return 0;
   if (Opc == TargetOpcode::EXTRACT_SUBREG ||
-      Opc == TargetOpcode::SUBREG_TO_REG ||
-      Opc == TargetOpcode::INSERT_SUBREG)
+      Opc == TargetOpcode::SUBREG_TO_REG || Opc == TargetOpcode::INSERT_SUBREG)
     // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
     // close to their uses to facilitate coalescing.
     return 0;
   if (SU->NumSuccs == 0 && SU->NumPreds != 0)
     // If SU does not have a register use, i.e. it doesn't produce a value
     // that would be consumed (e.g. store), then it terminates a chain of
-    // computation.  Give it a large SethiUllman number so it will be
+    // computation.  Give it a large SethiUllman number, so it will be
     // scheduled right before its predecessors that it doesn't lengthen
     // their live ranges.
     return 0xffff;
@@ -2089,14 +2070,15 @@
   for (const TargetRegisterClass *RC : TRI->regclasses()) {
     unsigned Id = RC->getID();
     unsigned RP = RegPressure[Id];
-    if (!RP) continue;
+    if (!RP)
+      continue;
     LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "
                       << RegLimit[Id] << '\n');
   }
 }
 #endif
 
-bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
+bool RegReductionPQBase::highRegPressure(const SUnit *SU) const {
   if (!TLI)
     return false;
 
@@ -2109,10 +2091,10 @@
     if (PredSU->NumRegDefsLeft == 0) {
       continue;
     }
-    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
+    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, ScheduleDAG);
          RegDefPos.IsValid(); RegDefPos.Advance()) {
       unsigned RCId, Cost;
-      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
+      getCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
 
       if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
         return true;
@@ -2121,16 +2103,16 @@
   return false;
 }
 
-bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
+bool RegReductionPQBase::mayReduceRegPressure(const SUnit *SU) const {
   const SDNode *N = SU->getNode();
 
   if (!N->isMachineOpcode() || !SU->NumSuccs)
     return false;
 
   unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
-  for (unsigned i = 0; i != NumDefs; ++i) {
-    MVT VT = N->getSimpleValueType(i);
-    if (!N->hasAnyUseOfValue(i))
+  for (unsigned I = 0; I != NumDefs; ++I) {
+    MVT VT = N->getSimpleValueType(I);
+    if (!N->hasAnyUseOfValue(I))
       continue;
     unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
     if (RegPressure[RCId] >= RegLimit[RCId])
@@ -2146,7 +2128,8 @@
 //
 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
 // so could probably be factored.
-int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
+int RegReductionPQBase::regPressureDiff(const SUnit *SU,
+                                        unsigned &LiveUses) const {
   LiveUses = 0;
   int PDiff = 0;
   for (const SDep &Pred : SU->Preds) {
@@ -2160,7 +2143,7 @@
         ++LiveUses;
       continue;
     }
-    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
+    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, ScheduleDAG);
          RegDefPos.IsValid(); RegDefPos.Advance()) {
       MVT VT = RegDefPos.GetValue();
       unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
@@ -2174,9 +2157,9 @@
     return PDiff;
 
   unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
-  for (unsigned i = 0; i != NumDefs; ++i) {
-    MVT VT = N->getSimpleValueType(i);
-    if (!N->hasAnyUseOfValue(i))
+  for (unsigned I = 0; I != NumDefs; ++I) {
+    MVT VT = N->getSimpleValueType(I);
+    if (!N->hasAnyUseOfValue(I))
       continue;
     unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
     if (RegPressure[RCId] >= RegLimit[RCId])
@@ -2213,18 +2196,18 @@
     //
     // The most important aspect of register tracking is balancing the increase
     // here with the reduction further below. Note that this SU may use multiple
-    // defs in PredSU. The can't be determined here, but we've already
+    // defs in PredSU. This can't be determined here, but we've already
     // compensated by reducing NumRegDefsLeft in PredSU during
     // ScheduleDAGSDNodes::AddSchedEdges.
     --PredSU->NumRegDefsLeft;
     unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
-    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
+    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, ScheduleDAG);
          RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
       if (SkipRegDefs)
         continue;
 
       unsigned RCId, Cost;
-      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
+      getCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
       RegPressure[RCId] += Cost;
       break;
     }
@@ -2232,22 +2215,21 @@
 
   // We should have this assert, but there may be dead SDNodes that never
   // materialize as SUnits, so they don't appear to generate liveness.
-  //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
+  // assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
   int SkipRegDefs = (int)SU->NumRegDefsLeft;
-  for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
+  for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, ScheduleDAG);
        RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
     if (SkipRegDefs > 0)
       continue;
     unsigned RCId, Cost;
-    GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
+    getCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
     if (RegPressure[RCId] < Cost) {
       // Register pressure tracking is imprecise. This can happen. But we try
       // hard not to let it happen because it likely results in poor scheduling.
       LLVM_DEBUG(dbgs() << "  SU(" << SU->NodeNum
                         << ") has too many regdefs\n");
       RegPressure[RCId] = 0;
-    }
-    else {
+    } else {
       RegPressure[RCId] -= Cost;
     }
   }
@@ -2259,7 +2241,8 @@
     return;
 
   const SDNode *N = SU->getNode();
-  if (!N) return;
+  if (!N)
+    return;
 
   if (!N->isMachineOpcode()) {
     if (N->getOpcode() != ISD::CopyToReg)
@@ -2269,8 +2252,7 @@
     if (Opc == TargetOpcode::EXTRACT_SUBREG ||
         Opc == TargetOpcode::INSERT_SUBREG ||
         Opc == TargetOpcode::SUBREG_TO_REG ||
-        Opc == TargetOpcode::REG_SEQUENCE ||
-        Opc == TargetOpcode::IMPLICIT_DEF)
+        Opc == TargetOpcode::REG_SEQUENCE || Opc == TargetOpcode::IMPLICIT_DEF)
       return;
   }
 
@@ -2303,9 +2285,9 @@
       continue;
     }
     unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
-    for (unsigned i = 0; i != NumDefs; ++i) {
-      MVT VT = PN->getSimpleValueType(i);
-      if (!PN->hasAnyUseOfValue(i))
+    for (unsigned I = 0; I != NumDefs; ++I) {
+      MVT VT = PN->getSimpleValueType(I);
+      if (!PN->hasAnyUseOfValue(I))
         continue;
       unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
       if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
@@ -2316,15 +2298,15 @@
     }
   }
 
-  // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+  // Check for isMachineOpcode() as preScheduleNodesWithMultipleUses()
   // may transfer data dependencies to CopyToReg.
   if (SU->NumSuccs && N->isMachineOpcode()) {
     unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
-    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
-      MVT VT = N->getSimpleValueType(i);
+    for (unsigned I = NumDefs, E = N->getNumValues(); I != E; ++I) {
+      MVT VT = N->getSimpleValueType(I);
       if (VT == MVT::Glue || VT == MVT::Other)
         continue;
-      if (!N->hasAnyUseOfValue(i))
+      if (!N->hasAnyUseOfValue(I))
         continue;
       unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
       RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
@@ -2343,13 +2325,14 @@
 static unsigned closestSucc(const SUnit *SU) {
   unsigned MaxHeight = 0;
   for (const SDep &Succ : SU->Succs) {
-    if (Succ.isCtrl()) continue;  // ignore chain succs
+    if (Succ.isCtrl())
+      continue; // ignore chain succs
     unsigned Height = Succ.getSUnit()->getHeight();
-    // If there are bunch of CopyToRegs stacked up, they should be considered
+    // If there is a bunch of CopyToRegs stacked up, they should be considered
     // to be at the same position.
     if (Succ.getSUnit()->getNode() &&
         Succ.getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
-      Height = closestSucc(Succ.getSUnit())+1;
+      Height = closestSucc(Succ.getSUnit()) + 1;
     if (Height > MaxHeight)
       MaxHeight = Height;
   }
@@ -2361,7 +2344,8 @@
 static unsigned calcMaxScratches(const SUnit *SU) {
   unsigned Scratches = 0;
   for (const SDep &Pred : SU->Preds) {
-    if (Pred.isCtrl()) continue;  // ignore chain preds
+    if (Pred.isCtrl())
+      continue; // ignore chain preds
     Scratches++;
   }
   return Scratches;
@@ -2372,12 +2356,13 @@
 static bool hasOnlyLiveInOpers(const SUnit *SU) {
   bool RetVal = false;
   for (const SDep &Pred : SU->Preds) {
-    if (Pred.isCtrl()) continue;
+    if (Pred.isCtrl())
+      continue;
     const SUnit *PredSU = Pred.getSUnit();
     if (PredSU->getNode() &&
         PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
-      unsigned Reg =
-        cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
+      Register Reg =
+          cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
       if (Register::isVirtualRegister(Reg)) {
         RetVal = true;
         continue;
@@ -2394,11 +2379,12 @@
 static bool hasOnlyLiveOutUses(const SUnit *SU) {
   bool RetVal = false;
   for (const SDep &Succ : SU->Succs) {
-    if (Succ.isCtrl()) continue;
+    if (Succ.isCtrl())
+      continue;
     const SUnit *SuccSU = Succ.getSUnit();
     if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
-      unsigned Reg =
-        cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
+      Register Reg =
+          cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
       if (Register::isVirtualRegister(Reg)) {
         RetVal = true;
         continue;
@@ -2431,7 +2417,8 @@
   SU->isVRegCycle = true;
 
   for (const SDep &Pred : SU->Preds) {
-    if (Pred.isCtrl()) continue;
+    if (Pred.isCtrl())
+      continue;
     Pred.getSUnit()->isVRegCycle = true;
   }
 }
@@ -2443,7 +2430,8 @@
     return;
 
   for (const SDep &Pred : SU->Preds) {
-    if (Pred.isCtrl()) continue;  // ignore chain preds
+    if (Pred.isCtrl())
+      continue; // ignore chain preds
     SUnit *PredSU = Pred.getSUnit();
     if (PredSU->isVRegCycle) {
       assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
@@ -2461,7 +2449,8 @@
     return false;
 
   for (const SDep &Pred : SU->Preds) {
-    if (Pred.isCtrl()) continue;  // ignore chain preds
+    if (Pred.isCtrl())
+      continue; // ignore chain preds
     if (Pred.getSUnit()->isVRegCycle &&
         Pred.getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
       LLVM_DEBUG(dbgs() << "  VReg cycle use: SU (" << SU->NodeNum << ")\n");
@@ -2474,29 +2463,30 @@
 // Check for either a dependence (latency) or resource (hazard) stall.
 //
 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
-static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
-  if ((int)SPQ->getCurCycle() < Height) return true;
-  if (SPQ->getHazardRec()->getHazardType(SU, 0)
-      != ScheduleHazardRecognizer::NoHazard)
+static bool buHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
+  if ((int)SPQ->getCurCycle() < Height)
+    return true;
+  if (SPQ->getHazardRec()->getHazardType(SU, 0) !=
+      ScheduleHazardRecognizer::NoHazard)
     return true;
   return false;
 }
 
 // Return -1 if left has higher priority, 1 if right has higher priority.
 // Return 0 if latency-based priority is equivalent.
-static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
+static int buCompareLatency(SUnit *Left, SUnit *Right, bool CheckPref,
                             RegReductionPQBase *SPQ) {
   // Scheduling an instruction that uses a VReg whose postincrement has not yet
   // been scheduled will induce a copy. Model this as an extra cycle of latency.
-  int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
-  int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
-  int LHeight = (int)left->getHeight() + LPenalty;
-  int RHeight = (int)right->getHeight() + RPenalty;
+  int LPenalty = hasVRegCycleUse(Left) ? 1 : 0;
+  int RPenalty = hasVRegCycleUse(Right) ? 1 : 0;
+  int LHeight = (int)Left->getHeight() + LPenalty;
+  int RHeight = (int)Right->getHeight() + RPenalty;
 
-  bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
-    BUHasStall(left, LHeight, SPQ);
-  bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
-    BUHasStall(right, RHeight, SPQ);
+  bool LStall = (!CheckPref || Left->SchedulingPref == Sched::ILP) &&
+                buHasStall(Left, LHeight, SPQ);
+  bool RStall = (!CheckPref || Right->SchedulingPref == Sched::ILP) &&
+                buHasStall(Right, RHeight, SPQ);
 
   // If scheduling one of the node will cause a pipeline stall, delay it.
   // If scheduling either one of the node will cause a pipeline stall, sort
@@ -2511,8 +2501,8 @@
 
   // If either node is scheduling for latency, sort them by height/depth
   // and latency.
-  if (!checkPref || (left->SchedulingPref == Sched::ILP ||
-                     right->SchedulingPref == Sched::ILP)) {
+  if (!CheckPref || (Left->SchedulingPref == Sched::ILP ||
+                     Right->SchedulingPref == Sched::ILP)) {
     // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
     // is enabled, grouping instructions by cycle, then its height is already
     // covered so only its depth matters. We also reach this point if both stall
@@ -2521,52 +2511,52 @@
       if (LHeight != RHeight)
         return LHeight > RHeight ? 1 : -1;
     }
-    int LDepth = left->getDepth() - LPenalty;
-    int RDepth = right->getDepth() - RPenalty;
+    int LDepth = Left->getDepth() - LPenalty;
+    int RDepth = Right->getDepth() - RPenalty;
     if (LDepth != RDepth) {
-      LLVM_DEBUG(dbgs() << "  Comparing latency of SU (" << left->NodeNum
-                        << ") depth " << LDepth << " vs SU (" << right->NodeNum
+      LLVM_DEBUG(dbgs() << "  Comparing latency of SU (" << Left->NodeNum
+                        << ") depth " << LDepth << " vs SU (" << Right->NodeNum
                         << ") depth " << RDepth << "\n");
       return LDepth < RDepth ? 1 : -1;
     }
-    if (left->Latency != right->Latency)
-      return left->Latency > right->Latency ? 1 : -1;
+    if (Left->Latency != Right->Latency)
+      return Left->Latency > Right->Latency ? 1 : -1;
   }
   return 0;
 }
 
-static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
+static bool buRRSort(SUnit *Left, SUnit *Right, RegReductionPQBase *SPQ) {
   // Schedule physical register definitions close to their use. This is
   // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
   // long as shortening physreg live ranges is generally good, we can defer
   // creating a subtarget hook.
   if (!DisableSchedPhysRegJoin) {
-    bool LHasPhysReg = left->hasPhysRegDefs;
-    bool RHasPhysReg = right->hasPhysRegDefs;
+    bool LHasPhysReg = Left->hasPhysRegDefs;
+    bool RHasPhysReg = Right->hasPhysRegDefs;
     if (LHasPhysReg != RHasPhysReg) {
-      #ifndef NDEBUG
-      static const char *const PhysRegMsg[] = { " has no physreg",
-                                                " defines a physreg" };
-      #endif
-      LLVM_DEBUG(dbgs() << "  SU (" << left->NodeNum << ") "
-                        << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum
+#ifndef NDEBUG
+      static const char *const PhysRegMsg[] = {" has no physreg",
+                                               " defines a physreg"};
+#endif
+      LLVM_DEBUG(dbgs() << "  SU (" << Left->NodeNum << ") "
+                        << PhysRegMsg[LHasPhysReg] << " SU(" << Right->NodeNum
                         << ") " << PhysRegMsg[RHasPhysReg] << "\n");
       return LHasPhysReg < RHasPhysReg;
     }
   }
 
   // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
-  unsigned LPriority = SPQ->getNodePriority(left);
-  unsigned RPriority = SPQ->getNodePriority(right);
+  unsigned LPriority = SPQ->getNodePriority(Left);
+  unsigned RPriority = SPQ->getNodePriority(Right);
 
   // Be really careful about hoisting call operands above previous calls.
   // Only allows it if it would reduce register pressure.
-  if (left->isCall && right->isCallOp) {
-    unsigned RNumVals = right->getNode()->getNumValues();
+  if (Left->isCall && Right->isCallOp) {
+    unsigned RNumVals = Right->getNode()->getNumValues();
     RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
   }
-  if (right->isCall && left->isCallOp) {
-    unsigned LNumVals = left->getNode()->getNumValues();
+  if (Right->isCall && Left->isCallOp) {
+    unsigned LNumVals = Left->getNode()->getNumValues();
     LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
   }
 
@@ -2575,9 +2565,9 @@
 
   // One or both of the nodes are calls and their sethi-ullman numbers are the
   // same, then keep source order.
-  if (left->isCall || right->isCall) {
-    unsigned LOrder = SPQ->getNodeOrdering(left);
-    unsigned ROrder = SPQ->getNodeOrdering(right);
+  if (Left->isCall || Right->isCall) {
+    unsigned LOrder = SPQ->getNodeOrdering(Left);
+    unsigned ROrder = SPQ->getNodeOrdering(Right);
 
     // Prefer an ordering where the lower the non-zero order number, the higher
     // the preference.
@@ -2602,122 +2592,123 @@
   // t3 = op t4, c2
   //
   // This creates more short live intervals.
-  unsigned LDist = closestSucc(left);
-  unsigned RDist = closestSucc(right);
+  unsigned LDist = closestSucc(Left);
+  unsigned RDist = closestSucc(Right);
   if (LDist != RDist)
     return LDist < RDist;
 
   // How many registers becomes live when the node is scheduled.
-  unsigned LScratch = calcMaxScratches(left);
-  unsigned RScratch = calcMaxScratches(right);
+  unsigned LScratch = calcMaxScratches(Left);
+  unsigned RScratch = calcMaxScratches(Right);
   if (LScratch != RScratch)
     return LScratch > RScratch;
 
   // Comparing latency against a call makes little sense unless the node
   // is register pressure-neutral.
-  if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
-    return (left->NodeQueueId > right->NodeQueueId);
+  if ((Left->isCall && RPriority > 0) || (Right->isCall && LPriority > 0))
+    return (Left->NodeQueueId > Right->NodeQueueId);
 
   // Do not compare latencies when one or both of the nodes are calls.
-  if (!DisableSchedCycles &&
-      !(left->isCall || right->isCall)) {
-    int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
-    if (result != 0)
-      return result > 0;
-  }
-  else {
-    if (left->getHeight() != right->getHeight())
-      return left->getHeight() > right->getHeight();
+  if (!DisableSchedCycles && !(Left->isCall || Right->isCall)) {
+    int Result = buCompareLatency(Left, Right, false /*checkPref*/, SPQ);
+    if (Result != 0)
+      return Result > 0;
+  } else {
+    if (Left->getHeight() != Right->getHeight())
+      return Left->getHeight() > Right->getHeight();
 
-    if (left->getDepth() != right->getDepth())
-      return left->getDepth() < right->getDepth();
+    if (Left->getDepth() != Right->getDepth())
+      return Left->getDepth() < Right->getDepth();
   }
 
-  assert(left->NodeQueueId && right->NodeQueueId &&
+  assert(Left->NodeQueueId && Right->NodeQueueId &&
          "NodeQueueId cannot be zero");
-  return (left->NodeQueueId > right->NodeQueueId);
+  return (Left->NodeQueueId > Right->NodeQueueId);
 }
 
 // Bottom up
-bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
-  if (int res = checkSpecialNodes(left, right))
-    return res > 0;
+bool bu_ls_rr_sort::operator()(SUnit *Left, SUnit *Right) const {
+  if (int Res = checkSpecialNodes(Left, Right))
+    return Res > 0;
 
-  return BURRSort(left, right, SPQ);
+  return buRRSort(Left, Right, SPQ);
 }
 
 // Source order, otherwise bottom up.
-bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
-  if (int res = checkSpecialNodes(left, right))
-    return res > 0;
+bool src_ls_rr_sort::operator()(SUnit *Left, SUnit *Right) const {
+  if (int Res = checkSpecialNodes(Left, Right))
+    return Res > 0;
 
-  unsigned LOrder = SPQ->getNodeOrdering(left);
-  unsigned ROrder = SPQ->getNodeOrdering(right);
+  unsigned LOrder = SPQ->getNodeOrdering(Left);
+  unsigned ROrder = SPQ->getNodeOrdering(Right);
 
   // Prefer an ordering where the lower the non-zero order number, the higher
   // the preference.
   if ((LOrder || ROrder) && LOrder != ROrder)
     return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
 
-  return BURRSort(left, right, SPQ);
+  return buRRSort(Left, Right, SPQ);
 }
 
 // If the time between now and when the instruction will be ready can cover
 // the spill code, then avoid adding it to the ready queue. This gives long
-// stalls highest priority and allows hoisting across calls. It should also
+// stalls the highest priority and allows hoisting across calls. It should also
 // speed up processing the available queue.
 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
   static const unsigned ReadyDelay = 3;
 
-  if (SPQ->MayReduceRegPressure(SU)) return true;
+  if (SPQ->mayReduceRegPressure(SU))
+    return true;
 
-  if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
+  if (SU->getHeight() > (CurCycle + ReadyDelay))
+    return false;
 
-  if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
-      != ScheduleHazardRecognizer::NoHazard)
+  if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay) !=
+      ScheduleHazardRecognizer::NoHazard)
     return false;
 
   return true;
 }
 
 // Return true if right should be scheduled with higher priority than left.
-bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
-  if (int res = checkSpecialNodes(left, right))
-    return res > 0;
+bool hybrid_ls_rr_sort::operator()(SUnit *Left, SUnit *Right) const {
+  if (int Res = checkSpecialNodes(Left, Right))
+    return Res > 0;
 
-  if (left->isCall || right->isCall)
+  if (Left->isCall || Right->isCall)
     // No way to compute latency of calls.
-    return BURRSort(left, right, SPQ);
+    return buRRSort(Left, Right, SPQ);
 
-  bool LHigh = SPQ->HighRegPressure(left);
-  bool RHigh = SPQ->HighRegPressure(right);
+  bool LHigh = SPQ->highRegPressure(Left);
+  bool RHigh = SPQ->highRegPressure(Right);
   // Avoid causing spills. If register pressure is high, schedule for
   // register pressure reduction.
   if (LHigh && !RHigh) {
-    LLVM_DEBUG(dbgs() << "  pressure SU(" << left->NodeNum << ") > SU("
-                      << right->NodeNum << ")\n");
+    LLVM_DEBUG(dbgs() << "  pressure SU(" << Left->NodeNum << ") > SU("
+                      << Right->NodeNum << ")\n");
     return true;
   }
-  else if (!LHigh && RHigh) {
-    LLVM_DEBUG(dbgs() << "  pressure SU(" << right->NodeNum << ") > SU("
-                      << left->NodeNum << ")\n");
+  if (!LHigh && RHigh) {
+    LLVM_DEBUG(dbgs() << "  pressure SU(" << Right->NodeNum << ") > SU("
+                      << Left->NodeNum << ")\n");
     return false;
   }
   if (!LHigh && !RHigh) {
-    int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
-    if (result != 0)
-      return result > 0;
+    int Result = buCompareLatency(Left, Right, true /*checkPref*/, SPQ);
+    if (Result != 0)
+      return Result > 0;
   }
-  return BURRSort(left, right, SPQ);
+  return buRRSort(Left, Right, SPQ);
 }
 
 // Schedule as many instructions in each cycle as possible. So don't make an
 // instruction available unless it is ready in the current cycle.
 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
-  if (SU->getHeight() > CurCycle) return false;
+  if (SU->getHeight() > CurCycle)
+    return false;
 
-  if (SPQ->getHazardRec()->getHazardType(SU, 0)
-      != ScheduleHazardRecognizer::NoHazard)
+  if (SPQ->getHazardRec()->getHazardType(SU, 0) !=
+      ScheduleHazardRecognizer::NoHazard)
     return false;
 
   return true;
@@ -2731,8 +2722,7 @@
     return true;
 
   if (Opc == TargetOpcode::EXTRACT_SUBREG ||
-      Opc == TargetOpcode::SUBREG_TO_REG ||
-      Opc == TargetOpcode::INSERT_SUBREG)
+      Opc == TargetOpcode::SUBREG_TO_REG || Opc == TargetOpcode::INSERT_SUBREG)
     // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
     // close to their uses to facilitate coalescing.
     return true;
@@ -2747,81 +2737,83 @@
 
 // list-ilp is currently an experimental scheduler that allows various
 // heuristics to be enabled prior to the normal register reduction logic.
-bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
-  if (int res = checkSpecialNodes(left, right))
-    return res > 0;
+bool ilp_ls_rr_sort::operator()(SUnit *Left, SUnit *Right) const {
+  if (int Res = checkSpecialNodes(Left, Right))
+    return Res > 0;
 
-  if (left->isCall || right->isCall)
+  if (Left->isCall || Right->isCall)
     // No way to compute latency of calls.
-    return BURRSort(left, right, SPQ);
+    return buRRSort(Left, Right, SPQ);
 
   unsigned LLiveUses = 0, RLiveUses = 0;
   int LPDiff = 0, RPDiff = 0;
   if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
-    LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
-    RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
+    LPDiff = SPQ->regPressureDiff(Left, LLiveUses);
+    RPDiff = SPQ->regPressureDiff(Right, RLiveUses);
   }
   if (!DisableSchedRegPressure && LPDiff != RPDiff) {
-    LLVM_DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum
-                      << "): " << LPDiff << " != SU(" << right->NodeNum
+    LLVM_DEBUG(dbgs() << "regPressureDiff SU(" << Left->NodeNum
+                      << "): " << LPDiff << " != SU(" << Right->NodeNum
                       << "): " << RPDiff << "\n");
     return LPDiff > RPDiff;
   }
 
   if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
-    bool LReduce = canEnableCoalescing(left);
-    bool RReduce = canEnableCoalescing(right);
-    if (LReduce && !RReduce) return false;
-    if (RReduce && !LReduce) return true;
+    bool LReduce = canEnableCoalescing(Left);
+    bool RReduce = canEnableCoalescing(Right);
+    if (LReduce && !RReduce)
+      return false;
+    if (RReduce && !LReduce)
+      return true;
   }
 
   if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
-    LLVM_DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
-                      << " != SU(" << right->NodeNum << "): " << RLiveUses
+    LLVM_DEBUG(dbgs() << "Live uses SU(" << Left->NodeNum << "): " << LLiveUses
+                      << " != SU(" << Right->NodeNum << "): " << RLiveUses
                       << "\n");
     return LLiveUses < RLiveUses;
   }
 
   if (!DisableSchedStalls) {
-    bool LStall = BUHasStall(left, left->getHeight(), SPQ);
-    bool RStall = BUHasStall(right, right->getHeight(), SPQ);
+    bool LStall = buHasStall(Left, Left->getHeight(), SPQ);
+    bool RStall = buHasStall(Right, Right->getHeight(), SPQ);
     if (LStall != RStall)
-      return left->getHeight() > right->getHeight();
+      return Left->getHeight() > Right->getHeight();
   }
 
   if (!DisableSchedCriticalPath) {
-    int spread = (int)left->getDepth() - (int)right->getDepth();
-    if (std::abs(spread) > MaxReorderWindow) {
-      LLVM_DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
-                        << left->getDepth() << " != SU(" << right->NodeNum
-                        << "): " << right->getDepth() << "\n");
-      return left->getDepth() < right->getDepth();
+    int Spread = (int)Left->getDepth() - (int)Right->getDepth();
+    if (std::abs(Spread) > MaxReorderWindow) {
+      LLVM_DEBUG(dbgs() << "Depth of SU(" << Left->NodeNum << "): "
+                        << Left->getDepth() << " != SU(" << Right->NodeNum
+                        << "): " << Right->getDepth() << "\n");
+      return Left->getDepth() < Right->getDepth();
     }
   }
 
-  if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
-    int spread = (int)left->getHeight() - (int)right->getHeight();
-    if (std::abs(spread) > MaxReorderWindow)
-      return left->getHeight() > right->getHeight();
+  if (!DisableSchedHeight && Left->getHeight() != Right->getHeight()) {
+    int Spread = (int)Left->getHeight() - (int)Right->getHeight();
+    if (std::abs(Spread) > MaxReorderWindow)
+      return Left->getHeight() > Right->getHeight();
   }
 
-  return BURRSort(left, right, SPQ);
+  return buRRSort(Left, Right, SPQ);
 }
 
-void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
-  SUnits = &sunits;
+void RegReductionPQBase::initNodes(std::vector<SUnit> &SUs) {
+  SUnits = &SUs;
   // Add pseudo dependency edges for two-address nodes.
   if (!Disable2AddrHack)
-    AddPseudoTwoAddrDeps();
+    addPseudoTwoAddrDeps();
   // Reroute edges to nodes with multiple uses.
   if (!TracksRegPressure && !SrcOrder)
-    PrescheduleNodesWithMultipleUses();
+    preScheduleNodesWithMultipleUses();
   // Calculate node priorities.
-  CalculateSethiUllmanNumbers();
+  calculateSethiUllmanNumbers();
 
   // For single block loops, mark nodes that look like canonical IV increments.
-  if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB))
-    for (SUnit &SU : sunits)
+  if (ScheduleDAG->BB->isSuccessor(ScheduleDAG->BB))
+    for (SUnit &SU : SUs)
       initVRegCycle(&SU);
 }
 
@@ -2835,9 +2827,9 @@
     const MCInstrDesc &MCID = TII->get(Opc);
     unsigned NumRes = MCID.getNumDefs();
     unsigned NumOps = MCID.getNumOperands() - NumRes;
-    for (unsigned i = 0; i != NumOps; ++i) {
-      if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
-        SDNode *DU = SU->getNode()->getOperand(i).getNode();
+    for (unsigned I = 0; I != NumOps; ++I) {
+      if (MCID.getOperandConstraint(I + NumRes, MCOI::TIED_TO) != -1) {
+        SDNode *DU = SU->getNode()->getOperand(I).getNode();
         if (DU->getNodeId() != -1 &&
             Op->OrigNode == &(*SUnits)[DU->getNodeId()])
           return true;
@@ -2851,13 +2843,13 @@
 /// successor's explicit physregs whose definition can reach DepSU.
 /// i.e. DepSU should not be scheduled above SU.
 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
-                                         ScheduleDAGRRList *scheduleDAG,
+                                         ScheduleDAGRRList *ScheduleDAG,
                                          const TargetInstrInfo *TII,
                                          const TargetRegisterInfo *TRI) {
-  const MCPhysReg *ImpDefs
-    = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
+  const MCPhysReg *ImpDefs =
+      TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
   const uint32_t *RegMask = getNodeRegMask(SU->getNode());
-  if(!ImpDefs && !RegMask)
+  if (!ImpDefs && !RegMask)
     return false;
 
   for (const SDep &Succ : SU->Succs) {
@@ -2868,7 +2860,7 @@
 
       if (RegMask &&
           MachineOperand::clobbersPhysReg(RegMask, SuccPred.getReg()) &&
-          scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit()))
+          ScheduleDAG->isReachable(DepSU, SuccPred.getSUnit()))
         return true;
 
       if (ImpDefs)
@@ -2877,7 +2869,7 @@
           // definition of the register reaches from DepSU. IsReachable queries
           // a topological forward sort of the DAG (following the successors).
           if (TRI->regsOverlap(*ImpDef, SuccPred.getReg()) &&
-              scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit()))
+              ScheduleDAG->isReachable(DepSU, SuccPred.getSUnit()))
             return true;
     }
   }
@@ -2898,22 +2890,22 @@
     if (!SUNode->isMachineOpcode())
       continue;
     const MCPhysReg *SUImpDefs =
-      TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
+        TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
     const uint32_t *SURegMask = getNodeRegMask(SUNode);
     if (!SUImpDefs && !SURegMask)
       continue;
-    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
-      MVT VT = N->getSimpleValueType(i);
+    for (unsigned I = NumDefs, E = N->getNumValues(); I != E; ++I) {
+      MVT VT = N->getSimpleValueType(I);
       if (VT == MVT::Glue || VT == MVT::Other)
         continue;
-      if (!N->hasAnyUseOfValue(i))
+      if (!N->hasAnyUseOfValue(I))
         continue;
-      unsigned Reg = ImpDefs[i - NumDefs];
+      unsigned Reg = ImpDefs[I - NumDefs];
       if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
         return true;
       if (!SUImpDefs)
         continue;
-      for (;*SUImpDefs; ++SUImpDefs) {
+      for (; *SUImpDefs; ++SUImpDefs) {
         unsigned SUReg = *SUImpDefs;
         if (TRI->regsOverlap(Reg, SUReg))
           return true;
@@ -2923,7 +2915,7 @@
   return false;
 }
 
-/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
+/// preScheduleNodesWithMultipleUses - Nodes with multiple uses
 /// are not handled well by the general register pressure reduction
 /// heuristics. When presented with code like this:
 ///
@@ -2953,7 +2945,7 @@
 /// This results in the store being scheduled immediately
 /// after N, which shortens the U->N live range, reducing
 /// register pressure.
-void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
+void RegReductionPQBase::preScheduleNodesWithMultipleUses() {
   // Visit all the nodes in topological order, working top-down.
   for (SUnit &SU : *SUnits) {
     // For now, only look at nodes with no data successors, such as stores.
@@ -2964,7 +2956,7 @@
     // For now, only look at nodes with exactly one data predecessor.
     if (SU.NumPreds != 1)
       continue;
-    // Avoid prescheduling copies to virtual registers, which don't behave
+    // Avoid pre-scheduling copies to virtual registers, which don't behave
     // like other nodes from the perspective of scheduling heuristics.
     if (SDNode *N = SU.getNode())
       if (N->getOpcode() == ISD::CopyToReg &&
@@ -2978,11 +2970,11 @@
         // Find the predecessor which is not data dependence.
         SDNode *PredND = Pred.getSUnit()->getNode();
 
-        // If PredND is FrameSetup, we should not pre-scheduled the node,
+        // If PredND is FrameSetup, we should not pre-schedule the node,
         // or else, when bottom up scheduling, ADJCALLSTACKDOWN and
         // ADJCALLSTACKUP may hold CallResource too long and make other
         // calls can't be scheduled. If there's no other available node
-        // to schedule, the schedular will try to rename the register by
+        // to schedule, the scheduler will try to rename the register by
         // creating copy to avoid the conflict which will fail because
         // CallResource is not a real physical register.
         if (PredND && PredND->isMachineOpcode() &&
@@ -3011,7 +3003,7 @@
     // Short-circuit the case where SU is PredSU's only data successor.
     if (PredSU->NumSuccs == 1)
       continue;
-    // Avoid prescheduling to copies from virtual registers, which don't behave
+    // Avoid pre-scheduling to copies from virtual registers, which don't behave
     // like other nodes from the perspective of scheduling heuristics.
     if (SDNode *N = SU.getNode())
       if (N->getOpcode() == ISD::CopyFromReg &&
@@ -3022,7 +3014,8 @@
     // Perform checks on the successors of PredSU.
     for (const SDep &PredSucc : PredSU->Succs) {
       SUnit *PredSuccSU = PredSucc.getSUnit();
-      if (PredSuccSU == &SU) continue;
+      if (PredSuccSU == &SU)
+        continue;
       // If PredSU has another successor with no data successors, for
       // now don't attempt to choose either over the other.
       if (PredSuccSU->NumSuccs == 0)
@@ -3032,7 +3025,7 @@
         if (canClobberPhysRegDefs(PredSuccSU, &SU, TII, TRI))
           goto outer_loop_continue;
       // Don't introduce graph cycles.
-      if (scheduleDAG->IsReachable(&SU, PredSuccSU))
+      if (ScheduleDAG->isReachable(&SU, PredSuccSU))
         goto outer_loop_continue;
     }
 
@@ -3042,31 +3035,31 @@
         dbgs() << "    Prescheduling SU #" << SU.NodeNum << " next to PredSU #"
                << PredSU->NodeNum
                << " to guide scheduling in the presence of multiple uses\n");
-    for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
-      SDep Edge = PredSU->Succs[i];
+    for (unsigned I = 0; I != PredSU->Succs.size(); ++I) {
+      SDep Edge = PredSU->Succs[I];
       assert(!Edge.isAssignedRegDep());
       SUnit *SuccSU = Edge.getSUnit();
       if (SuccSU != &SU) {
         Edge.setSUnit(PredSU);
-        scheduleDAG->RemovePred(SuccSU, Edge);
-        scheduleDAG->AddPredQueued(&SU, Edge);
+        ScheduleDAG->removePred(SuccSU, Edge);
+        ScheduleDAG->addPredQueued(&SU, Edge);
         Edge.setSUnit(&SU);
-        scheduleDAG->AddPredQueued(SuccSU, Edge);
-        --i;
+        ScheduleDAG->addPredQueued(SuccSU, Edge);
+        --I;
       }
     }
   outer_loop_continue:;
   }
 }
 
-/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
+/// addPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
 /// it as a def&use operand. Add a pseudo control edge from it to the other
 /// node (if it won't create a cycle) so the two-address one will be scheduled
 /// first (lower in the schedule). If both nodes are two-address, favor the
 /// one that has a CopyToReg use (more likely to be a loop induction update).
 /// If both are two-address, but one is commutable while the other is not
 /// commutable, favor the one that's not commutable.
-void RegReductionPQBase::AddPseudoTwoAddrDeps() {
+void RegReductionPQBase::addPseudoTwoAddrDeps() {
   for (SUnit &SU : *SUnits) {
     if (!SU.isTwoAddress)
       continue;
@@ -3075,15 +3068,15 @@
     if (!Node || !Node->isMachineOpcode() || SU.getNode()->getGluedNode())
       continue;
 
-    bool isLiveOut = hasOnlyLiveOutUses(&SU);
+    bool LiveOut = hasOnlyLiveOutUses(&SU);
     unsigned Opc = Node->getMachineOpcode();
     const MCInstrDesc &MCID = TII->get(Opc);
     unsigned NumRes = MCID.getNumDefs();
     unsigned NumOps = MCID.getNumOperands() - NumRes;
-    for (unsigned j = 0; j != NumOps; ++j) {
-      if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
+    for (unsigned J = 0; J != NumOps; ++J) {
+      if (MCID.getOperandConstraint(J + NumRes, MCOI::TIED_TO) == -1)
         continue;
-      SDNode *DU = SU.getNode()->getOperand(j).getNode();
+      SDNode *DU = SU.getNode()->getOperand(J).getNode();
       if (DU->getNodeId() == -1)
         continue;
       const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
@@ -3103,11 +3096,11 @@
         // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
         // constrains whatever is using the copy, instead of the copy
         // itself. In the case that the copy is coalesced, this
-        // preserves the intent of the pseudo two-address heurietics.
+        // preserves the intent of the pseudo two-address heuristics.
         while (SuccSU->Succs.size() == 1 &&
                SuccSU->getNode()->isMachineOpcode() &&
                SuccSU->getNode()->getMachineOpcode() ==
-                 TargetOpcode::COPY_TO_REGCLASS)
+                   TargetOpcode::COPY_TO_REGCLASS)
           SuccSU = SuccSU->Succs.front().getSUnit();
         // Don't constrain non-instruction nodes.
         if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
@@ -3125,15 +3118,15 @@
             SuccOpc == TargetOpcode::INSERT_SUBREG ||
             SuccOpc == TargetOpcode::SUBREG_TO_REG)
           continue;
-        if (!canClobberReachingPhysRegUse(SuccSU, &SU, scheduleDAG, TII, TRI) &&
+        if (!canClobberReachingPhysRegUse(SuccSU, &SU, ScheduleDAG, TII, TRI) &&
             (!canClobber(SuccSU, DUSU) ||
-             (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
+             (LiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
              (!SU.isCommutable && SuccSU->isCommutable)) &&
-            !scheduleDAG->IsReachable(SuccSU, &SU)) {
+            !ScheduleDAG->isReachable(SuccSU, &SU)) {
           LLVM_DEBUG(dbgs()
                      << "    Adding a pseudo-two-addr edge from SU #"
                      << SU.NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
-          scheduleDAG->AddPredQueued(&SU, SDep(SuccSU, SDep::Artificial));
+          ScheduleDAG->addPredQueued(&SU, SDep(SuccSU, SDep::Artificial));
         }
       }
     }
@@ -3152,7 +3145,7 @@
   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
 
   BURegReductionPriorityQueue *PQ =
-    new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
+      new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
   ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
   PQ->setScheduleDAG(SD);
   return SD;
@@ -3166,7 +3159,7 @@
   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
 
   SrcRegReductionPriorityQueue *PQ =
-    new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
+      new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
   ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
   PQ->setScheduleDAG(SD);
   return SD;
@@ -3181,7 +3174,7 @@
   const TargetLowering *TLI = IS->TLI;
 
   HybridBURRPriorityQueue *PQ =
-    new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
+      new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
 
   ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
   PQ->setScheduleDAG(SD);
@@ -3197,7 +3190,7 @@
   const TargetLowering *TLI = IS->TLI;
 
   ILPBURRPriorityQueue *PQ =
-    new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
+      new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
   ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
   PQ->setScheduleDAG(SD);
   return SD;