Index: llvm/include/llvm/Analysis/ScalarEvolution.h
===================================================================
--- llvm/include/llvm/Analysis/ScalarEvolution.h
+++ llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -803,6 +803,13 @@
     /// Cache for \c loopHasNoAbnormalExits.
     DenseMap<const Loop *, bool> LoopHasNoAbnormalExits;
 
+    /// Cache for \c loopHasNoSideEffects.
+    DenseMap<const Loop *, bool> LoopHasNoSideEffects;
+  
+    /// Returns true if \p L contains no instruction that can have side effects
+    /// (i.e. via throwing an exception, volatile or atomic access).
+    bool loopHasNoSideEffects(const Loop *L);
+
     /// Returns true if \p L contains no instruction that can abnormally exit
     /// the loop (i.e. via throwing an exception, by terminating the thread
     /// cleanly or by infinite looping in a called function).  Strictly
Index: llvm/lib/Analysis/ScalarEvolution.cpp
===================================================================
--- llvm/lib/Analysis/ScalarEvolution.cpp
+++ llvm/lib/Analysis/ScalarEvolution.cpp
@@ -4947,6 +4947,29 @@
   return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
 }
 
+bool ScalarEvolution::loopHasNoSideEffects(const Loop *L) {
+  auto Itr = LoopHasNoSideEffects.find(L);
+  if (Itr == LoopHasNoSideEffects.end()) {
+    auto NoSideEffectsInBB = [&](BasicBlock *BB) {
+      return all_of(*BB, [](Instruction &I) {
+        // Non-atomic, non-volatile stores are ok.
+        if (auto SI = dyn_cast<StoreInst>(&I)) {
+          return SI->isSimple();
+        } else {
+          return !I.mayHaveSideEffects();
+        }
+      });
+    };
+
+    auto InsertPair = LoopHasNoSideEffects.insert(
+        {L, all_of(L->getBlocks(), NoSideEffectsInBB)});
+    assert(InsertPair.second && "We just checked!");
+    Itr = InsertPair.first;
+  }
+
+  return Itr->second;
+}
+
 bool ScalarEvolution::loopHasNoAbnormalExits(const Loop *L) {
   auto Itr = LoopHasNoAbnormalExits.find(L);
   if (Itr == LoopHasNoAbnormalExits.end()) {
@@ -5534,6 +5557,7 @@
     forgetLoop(I);
 
   LoopHasNoAbnormalExits.erase(L);
+  LoopHasNoSideEffects.erase(L);
 }
 
 void ScalarEvolution::forgetValue(Value *V) {
@@ -8701,8 +8725,13 @@
 
   const SCEV *Stride = IV->getStepRecurrence(*this);
 
+  bool PositiveStride = isKnownPositive(Stride);
+
   // Avoid negative or zero stride values
-  if (!isKnownPositive(Stride))
+  // We can assume stride to be positive if NoWrap is true and loop does not
+  // have abnormal exits or side effects.
+  if (!PositiveStride && (!NoWrap || !loopHasNoAbnormalExits(L) || 
+      !loopHasNoSideEffects(L)))
     return getCouldNotCompute();
 
   // Avoid proven overflow cases: this will ensure that the backedge taken count
@@ -8716,8 +8745,15 @@
                                       : ICmpInst::ICMP_ULT;
   const SCEV *Start = IV->getStart();
   const SCEV *End = RHS;
-  if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
+  if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) {
+    // If loop entry guard is missing, we could be dealing with a single trip
+    // do-while loop. Stride cannot be assumed to be positive for such loops so
+    // we bail out.
+    if (!PositiveStride) 
+      return getCouldNotCompute();
+    
     End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
+  }
 
   const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
 
Index: llvm/test/Analysis/ScalarEvolution/trip-count-assumed-positive-stride.ll
===================================================================
--- llvm/test/Analysis/ScalarEvolution/trip-count-assumed-positive-stride.ll
+++ llvm/test/Analysis/ScalarEvolution/trip-count-assumed-positive-stride.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
+
+; ScalarEvolution should be able to compute trip count by assuming stride to be
+; positive by checking the nsw flag on the IV and the presence of a loop entry
+; guard.
+
+; CHECK: Determining loop execution counts for: @foo1
+; CHECK: backedge-taken count is ((-1 + %n) /u %s)
+
+target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+
+; Function Attrs: norecurse nounwind
+define void @foo1(i32* nocapture %A, i32 %n, i32 %s) #0 {
+entry:
+  %cmp4 = icmp sgt i32 %n, 0
+  br i1 %cmp4, label %for.body, label %for.end
+
+for.body:                                         ; preds = %entry, %for.body
+  %i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+  %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
+  %0 = load i32, i32* %arrayidx, align 4
+  %inc = add nsw i32 %0, 1
+  store i32 %inc, i32* %arrayidx, align 4
+  %add = add nsw i32 %i.05, %s
+  %cmp = icmp slt i32 %add, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+
+; Loop should have unpredictable trip count without the loop entry guard.
+; CHECK: Determining loop execution counts for: @foo2
+; CHECK: Unpredictable backedge-taken count
+
+; Function Attrs: norecurse nounwind
+define void @foo2(i32* nocapture %A, i32 %n, i32 %s) #0 {
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+  %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
+  %0 = load i32, i32* %arrayidx, align 4
+  %inc = add nsw i32 %0, 1
+  store i32 %inc, i32* %arrayidx, align 4
+  %add = add nsw i32 %i.05, %s
+  %cmp = icmp slt i32 %add, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+