Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
===================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1061,12 +1061,31 @@
       !ICmpInst::isEquality(EqPred))
     return nullptr;
 
+  ICmpInst::Predicate UnsignedPred;
+
+  Value *A, *B;
+  if (match(UnsignedICmp,
+            m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) &&
+      match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
+      (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
+    if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
+      UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
+
+    // Given  ZeroCmpOp = (A + B)
+    //   ZeroCmpOp <= A && ZeroCmpOp != 0  -->  (0-B) <  A
+    //   ZeroCmpOp >  A || ZeroCmpOp == 0  -->  (0-B) >= A
+    if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
+        IsAnd)
+      return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
+    if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
+        !IsAnd)
+      return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
+  }
+
   Value *Base, *Offset;
   if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset))))
     return nullptr;
 
-  ICmpInst::Predicate UnsignedPred;
-
   // ZeroCmpOp <  Base && ZeroCmpOp != 0  --> Base >  Offset  iff Offset != 0
   // ZeroCmpOp >= Base || ZeroCmpOp == 0  --> Base <= Base    iff Offset != 0
   if (match(UnsignedICmp,
Index: llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
===================================================================
--- llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
+++ llvm/trunk/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
@@ -10,10 +10,9 @@
 ; CHECK-LABEL: @t0(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -30,9 +29,9 @@
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
 ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
 ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -46,11 +45,11 @@
 ; CHECK-LABEL: @t2_oneuse1(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
 ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -85,10 +84,9 @@
 ; CHECK-LABEL: @t4_commutativity0(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -101,10 +99,9 @@
 ; CHECK-LABEL: @t5_commutativity1(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -117,10 +114,9 @@
 ; CHECK-LABEL: @t6_commutativity3(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)
@@ -152,10 +148,9 @@
 ; CHECK-LABEL: @t8(
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[OFFSET]]
-; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT:    ret i1 [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %adjusted = add i8 %base, %offset
   call void @use8(i8 %adjusted)