diff --git a/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h b/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
--- a/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
@@ -15,6 +15,7 @@
 
 namespace llvm {
 
+class AAResults;
 class AssumptionCache;
 class DominatorTree;
 class Loop;
@@ -31,9 +32,10 @@
 /// will give up. The flag IsUtilMode controls the heuristic used in the
 /// LoopRotation. If it is true, the profitability heuristic will be ignored.
 bool LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
-                  AssumptionCache *AC, DominatorTree *DT, ScalarEvolution *SE,
-                  MemorySSAUpdater *MSSAU, const SimplifyQuery &SQ,
-                  bool RotationOnly, unsigned Threshold, bool IsUtilMode,
+                  AAResults *AA, AssumptionCache *AC, DominatorTree *DT,
+                  ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
+                  const SimplifyQuery &SQ, bool RotationOnly,
+                  unsigned Threshold, bool IsUtilMode,
                   bool PrepareForLTO = false);
 
 } // namespace llvm
diff --git a/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
--- a/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -60,7 +60,7 @@
   if (AR.MSSA)
     MSSAU = MemorySSAUpdater(AR.MSSA);
   bool Changed =
-      LoopRotation(&L, &AR.LI, &AR.TTI, &AR.AC, &AR.DT, &AR.SE,
+      LoopRotation(&L, &AR.LI, &AR.TTI, &AR.AA, &AR.AC, &AR.DT, &AR.SE,
                    MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, SQ, false,
                    Threshold, false, PrepareForLTO || PrepareForLTOOption);
 
@@ -110,6 +110,7 @@
 
     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
     const auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+    auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
     auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
@@ -129,7 +130,7 @@
                         ? DefaultRotationThreshold
                         : MaxHeaderSize;
 
-    return LoopRotation(L, LI, TTI, AC, &DT, &SE,
+    return LoopRotation(L, LI, TTI, AA, AC, &DT, &SE,
                         MSSAU.hasValue() ? MSSAU.getPointer() : nullptr, SQ,
                         false, Threshold, false,
                         PrepareForLTO || PrepareForLTOOption);
diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
--- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
@@ -18,6 +18,7 @@
 #include "llvm/Analysis/DomTreeUpdater.h"
 #include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/MemorySSA.h"
 #include "llvm/Analysis/MemorySSAUpdater.h"
@@ -59,6 +60,7 @@
   const unsigned MaxHeaderSize;
   LoopInfo *LI;
   const TargetTransformInfo *TTI;
+  AAResults *AA;
   AssumptionCache *AC;
   DominatorTree *DT;
   ScalarEvolution *SE;
@@ -70,12 +72,12 @@
 
 public:
   LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI,
-             const TargetTransformInfo *TTI, AssumptionCache *AC,
+             const TargetTransformInfo *TTI, AAResults *AA, AssumptionCache *AC,
              DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
              const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode,
              bool PrepareForLTO)
-      : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE),
-        MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
+      : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AA(AA), AC(AC), DT(DT),
+        SE(SE), MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly),
         IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {}
   bool processLoop(Loop *L);
 
@@ -250,6 +252,47 @@
   return false;
 }
 
+/// Return whether \p Inst is a load instruction that can safely be hoisted out
+/// of \p L loop body. It uses alias analysis results in \p AA to be less
+/// conservative wrt. potentially aliasing stores.
+static bool isInvariantLoad(Instruction *Inst, Loop *L, AAResults *AA) {
+  LoadInst *LI = dyn_cast<LoadInst>(Inst);
+  if (!LI)
+    return false;
+
+  if (!LI->isUnordered())
+    return false;
+
+  for (BasicBlock *ScanBB : L->getBlocks()) {
+    bool IsLoad;
+    unsigned NumScannedInst;
+    BasicBlock::iterator ScanFrom = ScanBB->end();
+
+    // Use FindAvailableLoadedValue() to find aliasing instruction writing to
+    // memory since it stops whenever such an instruction is encountered. Start
+    // again when it stops because it found a load producing the same value
+    // since that cannot affect the value.
+    Value *EquivalentValue;
+    do {
+      unsigned MaxInstsToScan = 0;
+      EquivalentValue = FindAvailableLoadedValue(
+          LI, ScanBB, ScanFrom, MaxInstsToScan, AA, &IsLoad, &NumScannedInst);
+    } while (EquivalentValue && dyn_cast<LoadInst>(EquivalentValue));
+
+    // An equivalent value was found but it is not a load. Assume the value is
+    // written by that instruction and give up.
+    if (EquivalentValue)
+      return false;
+
+    // We've stopped because something clobbers the address we are loading
+    // from, giving up.
+    if (ScanFrom != ScanBB->begin())
+      return false;
+  }
+
+  return true;
+}
+
 /// Rotate loop LP. Return true if the loop is rotated.
 ///
 /// \param SimplifiedLatch is true if the latch was just folded into the final
@@ -414,13 +457,15 @@
     while (I != E) {
       Instruction *Inst = &*I++;
 
-      // If the instruction's operands are invariant and it doesn't read or write
-      // memory, then it is safe to hoist.  Doing this doesn't change the order of
-      // execution in the preheader, but does prevent the instruction from
-      // executing in each iteration of the loop.  This means it is safe to hoist
-      // something that might trap, but isn't safe to hoist something that reads
-      // memory (without proving that the loop doesn't write).
-      if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() &&
+      // If the instruction's operands are invariant and either it doesn't read
+      // or write memory or it is an invariant read, then it is safe to hoist.
+      // Doing this doesn't change the order of execution in the preheader, but
+      // does prevent the instruction from executing in each iteration of the
+      // loop. This means it is safe to hoist something that might trap, but
+      // isn't safe to hoist something that reads memory without proving that
+      // the loop doesn't write.
+      if (L->hasLoopInvariantOperands(Inst) &&
+          (!Inst->mayReadFromMemory() || isInvariantLoad(Inst, L, AA)) &&
           !Inst->mayWriteToMemory() && !Inst->isTerminator() &&
           !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) {
         Inst->moveBefore(LoopEntryBranch);
@@ -822,12 +867,12 @@
 
 /// The utility to convert a loop into a loop with bottom test.
 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
-                        AssumptionCache *AC, DominatorTree *DT,
+                        AAResults *AA, AssumptionCache *AC, DominatorTree *DT,
                         ScalarEvolution *SE, MemorySSAUpdater *MSSAU,
                         const SimplifyQuery &SQ, bool RotationOnly = true,
                         unsigned Threshold = unsigned(-1),
                         bool IsUtilMode = true, bool PrepareForLTO) {
-  LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly,
+  LoopRotate LR(Threshold, LI, TTI, AA, AC, DT, SE, MSSAU, SQ, RotationOnly,
                 IsUtilMode, PrepareForLTO);
   return LR.processLoop(L);
 }
diff --git a/llvm/test/Transforms/LoopRotate/load-hoist.ll b/llvm/test/Transforms/LoopRotate/load-hoist.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/Transforms/LoopRotate/load-hoist.ll
@@ -0,0 +1,156 @@
+; RUN: opt -S -loop-rotate < %s -verify-loop-info -verify-dom-info | FileCheck %s
+; RUN: opt -S -loop-rotate < %s -verify-loop-info -verify-dom-info -enable-mssa-loop-dependency=true -verify-memoryssa | FileCheck %s
+
+; Invariant loads are hoisted if no aliasing occurs
+; CHECK-LABEL: @load(
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* %src, align 4
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[LD]], 100
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[IV]], 1
+; CHECK-NEXT:    store i32 0, i32* %dst, align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[INC]], 100
+; CHECK-NEXT:    [[CMP:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END:.*]]
+; CHECK:       [[FOR_END]]
+; CHECK-NEXT:    [[LD_LCSSA:%.*]] = phi i32 [ [[LD]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[LD_LCSSA]]
+define i32 @load(i32* noalias %src, i32* noalias %dst) nounwind ssp {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.body, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %ld = load i32, i32* %src, align 4
+  %cmp1 = icmp slt i32 %i.0, 100
+  %cmp2 = icmp slt i32 %ld, 100
+  %cmp = or i1 %cmp1, %cmp2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:                                         ; preds = %for.cond
+  %inc = add nsw i32 %i.0, 1
+  store i32 0, i32* %dst, align 4
+  br label %for.cond
+
+for.end:                                          ; preds = %for.cond
+  ret i32 %ld
+}
+
+; Atomic loads are not hoisted
+; CHECK-LABEL: @atomic_load(
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[LD_1:%.*]] = load atomic i32, i32* %src monotonic, align 4
+; CHECK-NEXT:    [[CMP2_1:%.*]] = icmp slt i32 [[LD_1]], 100
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[IV]], 1
+; CHECK-NEXT:    store i32 0, i32* %dst, align 4
+; CHECK-NEXT:    [[LD_2:%.*]] = load atomic i32, i32* %src monotonic, align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[INC]], 100
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[LD_2]], 100
+; CHECK-NEXT:    [[CMP:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END:.*]]
+; CHECK:       [[FOR_END]]
+; CHECK-NEXT:    [[LD_LCSSA:%.*]] = phi i32 [ [[LD]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[LD_LCSSA]]
+define i32 @atomic_load(i32* noalias %src, i32* noalias %dst) nounwind ssp {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.body, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %ld = load atomic i32, i32* %src monotonic, align 4
+  %cmp1 = icmp slt i32 %i.0, 100
+  %cmp2 = icmp slt i32 %ld, 100
+  %cmp = or i1 %cmp1, %cmp2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:                                         ; preds = %for.cond
+  %inc = add nsw i32 %i.0, 1
+  store i32 0, i32* %dst, align 4
+  br label %for.cond
+
+for.end:                                          ; preds = %for.cond
+  ret i32 %ld
+}
+
+; Invariant loads are not hoisted if store may alias it
+; CHECK-LABEL: @store_aliasing_load(
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[LD_1:%.*]] = load i32, i32* %src, align 4
+; CHECK-NEXT:    [[CMP2_1:%.*]] = icmp slt i32 [[LD_1]], 100
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[IV]], 1
+; CHECK-NEXT:    store i32 0, i32* %dst, align 4
+; CHECK-NEXT:    [[LD_2:%.*]] = load i32, i32* %src, align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[INC]], 100
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[LD_2]], 100
+; CHECK-NEXT:    [[CMP:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END:.*]]
+; CHECK:       [[FOR_END]]
+; CHECK-NEXT:    [[LD_LCSSA:%.*]] = phi i32 [ [[LD]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[LD_LCSSA]]
+define i32 @store_aliasing_load(i32* %src, i32* %dst) nounwind ssp {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.body, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %ld = load i32, i32* %src, align 4
+  %cmp1 = icmp slt i32 %i.0, 100
+  %cmp2 = icmp slt i32 %ld, 100
+  %cmp = or i1 %cmp1, %cmp2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:                                         ; preds = %for.cond
+  %inc = add nsw i32 %i.0, 1
+  store i32 0, i32* %dst, align 4
+  br label %for.cond
+
+for.end:                                          ; preds = %for.cond
+  ret i32 %ld
+}
+
+; Invariant loads are not hoisted if aliasing clobber may occurs
+; CHECK-LABEL: @clobber_aliasing_load(
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[LD_1:%.*]] = load i32, i32* %src, align 4
+; CHECK-NEXT:    [[CMP2_1:%.*]] = icmp slt i32 [[LD_1]], 100
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[INC]] = add nsw i32 [[IV]], 1
+; CHECK-NEXT:    call void asm sideeffect "", ""()
+; CHECK-NEXT:    [[LD_2:%.*]] = load i32, i32* %src, align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[INC]], 100
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[LD_2]], 100
+; CHECK-NEXT:    [[CMP:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END:.*]]
+; CHECK:       [[FOR_END]]
+; CHECK-NEXT:    [[LD_LCSSA:%.*]] = phi i32 [ [[LD]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[LD_LCSSA]]
+define i32 @clobber_aliasing_load(i32* %src, i32* %dst) nounwind ssp {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.body, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %ld = load i32, i32* %src, align 4
+  %cmp1 = icmp slt i32 %i.0, 100
+  %cmp2 = icmp slt i32 %ld, 100
+  %cmp = or i1 %cmp1, %cmp2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:                                         ; preds = %for.cond
+  %inc = add nsw i32 %i.0, 1
+  call void asm sideeffect "", ""()
+  br label %for.cond
+
+for.end:                                          ; preds = %for.cond
+  ret i32 %ld
+}
diff --git a/llvm/test/Transforms/LoopRotate/loopexitinglatch.ll b/llvm/test/Transforms/LoopRotate/loopexitinglatch.ll
--- a/llvm/test/Transforms/LoopRotate/loopexitinglatch.ll
+++ b/llvm/test/Transforms/LoopRotate/loopexitinglatch.ll
@@ -24,8 +24,7 @@
 ; CHECK-NEXT:    [[PREV_0:%.*]] = phi %struct.List* [ [[CURR_04:%.*]], [[IF_THEN]] ]
 ; CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_LIST]], %struct.List* [[CURR_0]], i32 0, i32 1
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[VAL]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VAL1]], align 4
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN]], label [[FOR_COND_IF_ELSE6_CRIT_EDGE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    [[CURR_04]] = phi %struct.List* [ [[TMP0]], [[IF_THEN_LR_PH]] ], [ [[CURR_0]], [[FOR_COND:%.*]] ]
@@ -119,35 +118,32 @@
 
 define i32 @test2(i32* %l) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:  entry:
+; CHECK-NEXT:  [[ENTRY:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[L:%.*]], align 4
 ; CHECK-NEXT:    [[TOBOOL2:%.*]] = icmp eq i32 [[TMP0]], 0
-; CHECK-NEXT:    br i1 [[TOBOOL2]], label [[CLEANUP:%.*]], label [[DO_COND_LR_PH:%.*]]
-; CHECK:       do.cond.lr.ph:
-; CHECK-NEXT:    br label [[DO_COND:%.*]]
-; CHECK:       do.body:
-; CHECK-NEXT:    [[A_0:%.*]] = phi i32 [ [[REM:%.*]], [[DO_COND]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[L]], align 4
-; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[TOBOOL]], label [[DO_BODY_CLEANUP_CRIT_EDGE:%.*]], label [[DO_COND]]
-; CHECK:       do.body.cleanup_crit_edge:
-; CHECK-NEXT:    [[SPLIT:%.*]] = phi i32 [ [[A_0]], [[DO_BODY:%.*]] ]
-; CHECK-NEXT:    br label [[CLEANUP]]
-; CHECK:       cleanup:
-; CHECK-NEXT:    [[A_0_LCSSA:%.*]] = phi i32 [ [[SPLIT]], [[DO_BODY_CLEANUP_CRIT_EDGE]] ], [ 100, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    br i1 [[TOBOOL2]], label %[[CLEANUP:.*]], label %[[DO_COND_LR_PH:.*]]
+; CHECK:       [[DO_COND_LR_PH]]:
+; CHECK-NEXT:    br label %[[DO_COND:.*]]
+; CHECK:       [[DO_BODY:.*]]:
+; CHECK-NEXT:    [[A_0:%.*]] = phi i32 [ [[REM:%.*]], %[[DO_COND]] ]
+; CHECK-NEXT:    br i1 [[TOBOOL2]], label %[[DO_BODY_CLEANUP_CRIT_EDGE:.*]], label %[[DO_COND]]
+; CHECK:       [[DO_BODY_CLEANUP_CRIT_EDGE]]:
+; CHECK-NEXT:    [[SPLIT:%.*]] = phi i32 [ [[A_0]], %[[DO_BODY]] ]
+; CHECK-NEXT:    br label %[[CLEANUP]]
+; CHECK:       [[CLEANUP]]:
+; CHECK-NEXT:    [[A_0_LCSSA:%.*]] = phi i32 [ [[SPLIT]], %[[DO_BODY_CLEANUP_CRIT_EDGE]] ], [ 100, %[[ENTRY]] ]
 ; CHECK-NEXT:    store i32 10, i32* [[L]], align 4
-; CHECK-NEXT:    br label [[CLEANUP2:%.*]]
-; CHECK:       do.cond:
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i32 [ [[TMP0]], [[DO_COND_LR_PH]] ], [ [[TMP1]], [[DO_BODY]] ]
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP2]], 13
+; CHECK-NEXT:    br label %[[CLEANUP2:.*]]
+; CHECK:       [[DO_COND]]:
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], 13
 ; CHECK-NEXT:    [[REM]] = srem i32 [[MUL]], 27
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[L]], align 4
 ; CHECK-NEXT:    [[TOBOOL1:%.*]] = icmp eq i32 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[TOBOOL1]], label [[CLEANUP2_LOOPEXIT:%.*]], label [[DO_BODY]]
-; CHECK:       cleanup2.loopexit:
-; CHECK-NEXT:    br label [[CLEANUP2]]
-; CHECK:       cleanup2:
-; CHECK-NEXT:    [[RETVAL_2:%.*]] = phi i32 [ [[A_0_LCSSA]], [[CLEANUP]] ], [ 0, [[CLEANUP2_LOOPEXIT]] ]
+; CHECK-NEXT:    br i1 [[TOBOOL1]], label %[[CLEANUP2_LOOPEXIT:.*]], label %[[DO_BODY]]
+; CHECK:       [[CLEANUP2_LOOPEXIT]]:
+; CHECK-NEXT:    br label %[[CLEANUP2]]
+; CHECK:       [[CLEANUP2]]:
+; CHECK-NEXT:    [[RETVAL_2:%.*]] = phi i32 [ [[A_0_LCSSA]], %[[CLEANUP]] ], [ 0, %[[CLEANUP2_LOOPEXIT]] ]
 ; CHECK-NEXT:    ret i32 [[RETVAL_2]]
 ;
 entry:
diff --git a/llvm/unittests/Transforms/Utils/LoopRotationUtilsTest.cpp b/llvm/unittests/Transforms/Utils/LoopRotationUtilsTest.cpp
--- a/llvm/unittests/Transforms/Utils/LoopRotationUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LoopRotationUtilsTest.cpp
@@ -7,6 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Transforms/Utils/LoopRotationUtils.h"
+#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/LoopInfo.h"
@@ -80,15 +81,14 @@
   TargetTransformInfo TTI(M->getDataLayout());
   TargetLibraryInfoImpl TLII;
   TargetLibraryInfo TLI(TLII);
+  AAResults AA(TLI);
   ScalarEvolution SE(*F, TLI, AC, DT, LI);
   SimplifyQuery SQ(M->getDataLayout());
 
   Loop *L = *LI.begin();
 
-  bool ret = LoopRotation(L, &LI, &TTI,
-                          &AC, &DT,
-                          &SE, nullptr,
-                          SQ, true, -1, false);
+  bool ret = LoopRotation(L, &LI, &TTI, &AA, &AC, &DT, &SE, nullptr, SQ, true,
+                          -1, false);
   EXPECT_TRUE(ret);
 }
 
@@ -151,15 +151,14 @@
   TargetTransformInfo TTI(M->getDataLayout());
   TargetLibraryInfoImpl TLII;
   TargetLibraryInfo TLI(TLII);
+  AAResults AA(TLI);
   ScalarEvolution SE(*F, TLI, AC, DT, LI);
   SimplifyQuery SQ(M->getDataLayout());
 
   Loop *L = *LI.begin();
 
-  bool ret = LoopRotation(L, &LI, &TTI,
-                          &AC, &DT,
-                          &SE, nullptr,
-                          SQ, true, -1, false);
+  bool ret = LoopRotation(L, &LI, &TTI, &AA, &AC, &DT, &SE, nullptr, SQ, true,
+                          -1, false);
   /// LoopRotation should properly report "true" as we still perform the first rotation
   /// so we do change the IR.
   EXPECT_TRUE(ret);