Index: lib/Transforms/InstCombine/InstCombineCalls.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1400,7 +1400,8 @@
   case Intrinsic::cttz: {
     // If all bits below the first known one are known zero,
     // this value is constant.
-    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+    auto Op0 = II->getArgOperand(0);
+    IntegerType *IT = dyn_cast<IntegerType>(Op0->getType());
     // FIXME: Try to simplify vectors of integers.
     if (!IT) break;
     uint32_t BitWidth = IT->getBitWidth();
@@ -1412,13 +1413,15 @@
     if ((Mask & KnownZero) == Mask)
       return replaceInstUsesWith(CI, ConstantInt::get(IT,
                                  APInt(BitWidth, TrailingZeros)));
-
+    if (KnownOne != 0 || isKnownNonZero(Op0, DL))
+      goto MakeCttzCltzUndef;
     }
     break;
   case Intrinsic::ctlz: {
     // If all bits above the first known one are known zero,
     // this value is constant.
-    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+    auto Op0 = II->getArgOperand(0);
+    IntegerType *IT = dyn_cast<IntegerType>(Op0->getType());
     // FIXME: Try to simplify vectors of integers.
     if (!IT) break;
     uint32_t BitWidth = IT->getBitWidth();
@@ -1430,10 +1433,21 @@
     if ((Mask & KnownZero) == Mask)
       return replaceInstUsesWith(CI, ConstantInt::get(IT,
                                  APInt(BitWidth, LeadingZeros)));
-
+    if (KnownOne != 0 || isKnownNonZero(Op0, DL))
+      goto MakeCttzCltzUndef;
     }
     break;
-
+  MakeCttzCltzUndef: {
+    bool IsZeroUndef = false;
+    auto Op1 = II->getArgOperand(1);
+    if (auto Op1C = dyn_cast<ConstantInt>(Op1))
+      IsZeroUndef = Op1C->getZExtValue() != 0;
+    if (!IsZeroUndef) {
+      Worklist.Add(II);
+      II->setOperand(1, ConstantInt::getAllOnesValue(Op1->getType()));
+    }
+    break;
+  }
   case Intrinsic::uadd_with_overflow:
   case Intrinsic::sadd_with_overflow:
   case Intrinsic::umul_with_overflow:
Index: lib/Transforms/InstCombine/InstCombineCompares.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2378,14 +2378,20 @@
       return &ICI;
     }
     break;
-  case Intrinsic::ctpop:
+  case Intrinsic::ctpop: {
     // popcount(A) == 0  ->  A == 0 and likewise for !=
-    if (*Op1C == 0) {
+    // popcount(A) == bitwidth(a)  ->  A == -1 and likewise for !=
+    bool IsZero = *Op1C == 0;
+    if (IsZero || *Op1C == Op1C->getBitWidth()) {
       Worklist.Add(II);
       ICI.setOperand(0, II->getArgOperand(0));
-      ICI.setOperand(1, ConstantInt::getNullValue(II->getType()));
+      auto NewOp = IsZero
+        ? ConstantInt::getNullValue(II->getType())
+        : ConstantInt::getAllOnesValue(II->getType());
+      ICI.setOperand(1, NewOp);
       return &ICI;
     }
+    }
     break;
   default:
     break;
Index: test/Transforms/InstCombine/intrinsics.ll
===================================================================
--- test/Transforms/InstCombine/intrinsics.ll
+++ test/Transforms/InstCombine/intrinsics.ll
@@ -302,9 +302,12 @@
   %tz = tail call i32 @llvm.cttz.i32(i32 %a, i1 false) nounwind readnone
   %tz.cmp = icmp ne i32 %tz, 32
   store volatile i1 %tz.cmp, i1* %c
-  %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
-  %pop.cmp = icmp eq i32 %pop, 0
-  store volatile i1 %pop.cmp, i1* %c
+  %pop0 = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
+  %pop0.cmp = icmp eq i32 %pop0, 0
+  store volatile i1 %pop0.cmp, i1* %c
+  %pop1 = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
+  %pop1.cmp = icmp eq i32 %pop1, 32
+  store volatile i1 %pop1.cmp, i1* %c
   ret void
 ; CHECK: @cmp.simplify
 ; CHECK-NEXT: entry:
@@ -312,8 +315,10 @@
 ; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
 ; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
 ; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
-; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
-; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
+; CHECK-NEXT: %pop0.cmp = icmp eq i32 %b, 0
+; CHECK-NEXT: store volatile i1 %pop0.cmp, i1* %c
+; CHECK-NEXT: %pop1.cmp = icmp eq i32 %b, -1
+; CHECK-NEXT: store volatile i1 %pop1.cmp, i1* %c
 }
 
 define <2 x i1> @ctlz_cmp_vec(<2 x i32> %a) {
@@ -374,6 +379,18 @@
 ; CHECK-NEXT: ret i32 undef
 }
 
+define i32 @ctlz_make_undef(i32 %a) {
+entry:
+  %or = or i32 %a, 8
+  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %or, i1 false)
+  ret i32 %ctlz
+; CHECK-LABEL: @ctlz_make_undef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or i32 %a, 8
+; CHECK-NEXT: %ctlz = tail call i32 @llvm.ctlz.i32(i32 %or, i1 true)
+; CHECK-NEXT: ret i32 %ctlz
+}
+
 define i32 @cttz_undef(i32 %Value) nounwind {
   %cttz = call i32 @llvm.cttz.i32(i32 0, i1 true)
   ret i32 %cttz
@@ -382,6 +399,18 @@
 ; CHECK-NEXT: ret i32 undef
 }
 
+define i32 @cttz_make_undef(i32 %a) {
+entry:
+  %or = or i32 %a, 8
+  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 false)
+  ret i32 %cttz
+; CHECK-LABEL: @cttz_make_undef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or i32 %a, 8
+; CHECK-NEXT: %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
+; CHECK-NEXT: ret i32 %cttz
+}
+
 define i32 @ctlz_select(i32 %Value) nounwind {
   %tobool = icmp ne i32 %Value, 0
   %ctlz = call i32 @llvm.ctlz.i32(i32 %Value, i1 true)