diff --git a/llvm/test/Analysis/ValueTracking/known-non-zero-range.ll b/llvm/test/Analysis/ValueTracking/known-non-zero-range.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/Analysis/ValueTracking/known-non-zero-range.ll
@@ -0,0 +1,211 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=instcombine < %s -S | FileCheck %s
+
+declare void @llvm.assume(i1)
+declare i8 @llvm.abs.i8(i8, i1)
+declare void @use1(i1)
+
+define i1 @check_ucmp_range(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], -11
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 19
+; CHECK-NEXT:    br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]]
+; CHECK:       true:
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+; CHECK:       false:
+; CHECK-NEXT:    [[RT:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT:    call void @use1(i1 [[RT]])
+; CHECK-NEXT:    ret i1 [[RT]]
+;
+  %ub = icmp ult i8 %x, 30
+  %lb = icmp ugt i8 %x, 10
+  %ne = and i1 %ub, %lb
+  br i1 %ne, label %true, label %false
+true:
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+false:
+  ;; This is mostly to test that we don't constant fold this the
+  ;; wrong-way (false), but should in fact be foldable to true. Maybe
+  ;; an `isKnownZero` that goes through the same level of analysis as
+  ;; `isKnownNonZero` would be useful.
+  %rt = icmp eq i8 %x, 0
+  ;; So that branch doesn't fold to select.
+  call void @use1(i1 %rt)
+  ret i1 %rt
+}
+
+define i1 @check_ucmp_range_from0(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range_from0(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], -1
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 14
+; CHECK-NEXT:    br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]]
+; CHECK:       true:
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+; CHECK:       false:
+; CHECK-NEXT:    [[RT:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT:    call void @use1(i1 [[RT]])
+; CHECK-NEXT:    ret i1 [[RT]]
+;
+  %ub = icmp ult i8 %x, 15
+  %lb = icmp ugt i8 %x, 0
+  %ne = and i1 %ub, %lb
+  br i1 %ne, label %true, label %false
+true:
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+false:
+  ;; This is mostly to test that we don't constant fold this the
+  ;; wrong-way (false), but should in fact be foldable to true. Maybe
+  ;; an `isKnownZero` that goes through the same level of analysis as
+  ;; `isKnownNonZero` would be useful.
+  %rt = icmp eq i8 %x, 0
+  ;; So that branch doesn't fold to select.
+  call void @use1(i1 %rt)
+  ret i1 %rt
+}
+
+define i1 @check_ucmp_range_from0_assumed(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range_from0_assumed(
+; CHECK-NEXT:    [[UB:%.*]] = add i8 [[X:%.*]], -1
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[UB]], 14
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE]])
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %ub = add i8 %x, -1
+  %ne = icmp ult i8 %ub, 14
+  call void @llvm.assume(i1 %ne)
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+}
+
+
+define i1 @check_ucmp_range_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range_and(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], 120
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i8 [[X1]], -20
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 16
+; CHECK-NEXT:    br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]]
+; CHECK:       true:
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+; CHECK:       false:
+; CHECK-NEXT:    [[RT:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT:    call void @use1(i1 [[RT]])
+; CHECK-NEXT:    ret i1 [[RT]]
+;
+  %x1 = and i8 %x, 123
+  %ub = icmp ult i8 %x1, 36
+  %lb = icmp ugt i8 %x1, 19
+  %ne = and i1 %ub, %lb
+  br i1 %ne, label %true, label %false
+true:
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+false:
+  ;; This is mostly to test that we don't constant fold this the
+  ;; wrong-way (false), but should in fact be foldable to true. Maybe
+  ;; an `isKnownZero` that goes through the same level of analysis as
+  ;; `isKnownNonZero` would be useful.
+  %rt = icmp eq i8 %x, 0
+  ;; So that branch doesn't fold to select.
+  call void @use1(i1 %rt)
+  ret i1 %rt
+}
+
+define i1 @check_ucmp_range_from0_and_abs(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range_from0_and_abs(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], 123
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i8 [[X1]], -1
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 14
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE]])
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x1 = and i8 %x, 123
+  %x2 = call i8 @llvm.abs.i8(i8 %x1, i1 true)
+  %ub = icmp ult i8 %x2, 15
+  %lb = icmp ugt i8 %x2, 0
+  %ne = and i1 %ub, %lb
+  call void @llvm.assume(i1 %ne)
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+}
+
+define i1 @check_cmp_range_fail(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_cmp_range_fail(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], 3
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 18
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE]])
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %ub = icmp slt i8 %x, 15
+  %lb = icmp sgt i8 %x, -4
+  %ne = and i1 %ub, %lb
+  call void @llvm.assume(i1 %ne)
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+}
+
+define i1 @check_ucmp_range_inv(i8 %x, i8 %y) {
+; CHECK-LABEL: @check_ucmp_range_inv(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], 14
+; CHECK-NEXT:    [[NE:%.*]] = icmp ult i8 [[TMP1]], 29
+; CHECK-NEXT:    br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]]
+; CHECK:       false:
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i8 [[Y]], 0
+; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP0]], [[CMP1]]
+; CHECK-NEXT:    ret i1 [[R]]
+; CHECK:       true:
+; CHECK-NEXT:    [[RT:%.*]] = icmp eq i8 [[X]], 0
+; CHECK-NEXT:    call void @use1(i1 [[RT]])
+; CHECK-NEXT:    ret i1 [[RT]]
+;
+  %ub = icmp slt i8 %x, 15
+  %lb = icmp sgt i8 %x, -15
+  %ne = and i1 %ub, %lb
+  br i1 %ne, label %true, label %false
+false:
+  %cmp0 = icmp ugt i8 %x, %y
+  %cmp1 = icmp eq i8 %y, 0
+  %r = or i1 %cmp0, %cmp1
+  ret i1 %r
+true:
+  ;; This is mostly to test that we don't constant fold this the
+  ;; wrong-way (false), but should in fact be foldable to true. Maybe
+  ;; an `isKnownZero` that goes through the same level of analysis as
+  ;; `isKnownNonZero` would be useful.
+  %rt = icmp eq i8 %x, 0
+  ;; So that branch doesn't fold to select.
+  call void @use1(i1 %rt)
+  ret i1 %rt
+}