Index: lib/Analysis/ValueTracking.cpp
===================================================================
--- lib/Analysis/ValueTracking.cpp
+++ lib/Analysis/ValueTracking.cpp
@@ -4001,13 +4001,11 @@
 
     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
       // The sign bit is set in both cases: this MUST overflow.
-      // Create a simple add instruction, and insert it into the struct.
       return OverflowResult::AlwaysOverflows;
     }
 
     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
       // The sign bit is clear in both cases: this CANNOT overflow.
-      // Create a simple add instruction, and insert it into the struct.
       return OverflowResult::NeverOverflows;
     }
   }
@@ -4124,11 +4122,18 @@
                                                    AssumptionCache *AC,
                                                    const Instruction *CxtI,
                                                    const DominatorTree *DT) {
-  // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
-  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
-  if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
-    return OverflowResult::NeverOverflows;
+  if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
+    KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
+
+    // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
+    if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
+      return OverflowResult::NeverOverflows;
+
+    // If the LHS is non-negative and the RHS negative, we always wrap.
+    if (LHSKnown.isNonNegative() && RHSKnown.isNegative())
+      return OverflowResult::AlwaysOverflows;
+  }
 
   return OverflowResult::MayOverflow;
 }
Index: lib/Transforms/InstCombine/InstCombineCalls.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2056,6 +2056,90 @@
     break;
   }
 
+  case Intrinsic::uadd_sat:
+  case Intrinsic::sadd_sat:
+    if (isa<Constant>(II->getArgOperand(0)) &&
+        !isa<Constant>(II->getArgOperand(1))) {
+      // Canonicalize constants into the RHS.
+      Value *LHS = II->getArgOperand(0);
+      II->setArgOperand(0, II->getArgOperand(1));
+      II->setArgOperand(1, LHS);
+      return II;
+    }
+    LLVM_FALLTHROUGH;
+  case Intrinsic::usub_sat:
+  case Intrinsic::ssub_sat: {
+    Value *Arg0 = II->getArgOperand(0);
+    Value *Arg1 = II->getArgOperand(1);
+    Intrinsic::ID IID = II->getIntrinsicID();
+
+    // Make use of known overflow information.
+    OverflowResult OR;
+    switch (IID) {
+    default:
+      llvm_unreachable("Unexpected intrinsic!");
+    case Intrinsic::uadd_sat:
+      OR = computeOverflowForUnsignedAdd(Arg0, Arg1, II);
+      if (OR == OverflowResult::NeverOverflows)
+        return BinaryOperator::CreateNUWAdd(Arg0, Arg1);
+      if (OR == OverflowResult::AlwaysOverflows)
+        return replaceInstUsesWith(*II,
+                                   ConstantInt::getAllOnesValue(II->getType()));
+      break;
+    case Intrinsic::usub_sat:
+      OR = computeOverflowForUnsignedSub(Arg0, Arg1, II);
+      if (OR == OverflowResult::NeverOverflows)
+        return BinaryOperator::CreateNUWSub(Arg0, Arg1);
+      if (OR == OverflowResult::AlwaysOverflows)
+        return replaceInstUsesWith(*II,
+                                   ConstantInt::getNullValue(II->getType()));
+      break;
+    case Intrinsic::sadd_sat:
+      if (willNotOverflowSignedAdd(Arg0, Arg1, *II))
+        return BinaryOperator::CreateNSWAdd(Arg0, Arg1);
+      break;
+    case Intrinsic::ssub_sat:
+      if (willNotOverflowSignedSub(Arg0, Arg1, *II))
+        return BinaryOperator::CreateNSWSub(Arg0, Arg1);
+      break;
+    }
+
+    // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
+    // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
+    // if Val and Val2 have the same sign
+    if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
+      Value *X;
+      const APInt *Val, *Val2;
+      APInt NewVal;
+      bool IsUnsigned =
+          IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
+      if (Other->getIntrinsicID() == II->getIntrinsicID() &&
+          match(Arg1, m_APInt(Val)) &&
+          match(Other->getArgOperand(0), m_Value(X)) &&
+          match(Other->getArgOperand(1), m_APInt(Val2))) {
+        if (IsUnsigned)
+          NewVal = Val->uadd_sat(*Val2);
+        else if (Val->isNonNegative() == Val2->isNonNegative()) {
+          bool Overflow;
+          NewVal = Val->sadd_ov(*Val2, Overflow);
+          if (Overflow) {
+            // Both adds together may add more than SignedMaxValue
+            // without saturating the final result.
+            break;
+          }
+        } else {
+          // Cannot fold saturated addition with different signs.
+          break;
+        }
+
+        return replaceInstUsesWith(
+            *II, Builder.CreateBinaryIntrinsic(
+                     IID, X, ConstantInt::get(II->getType(), NewVal)));
+      }
+    }
+    break;
+  }
+
   case Intrinsic::minnum:
   case Intrinsic::maxnum:
   case Intrinsic::minimum:
Index: test/Transforms/InstCombine/saturating-add-sub.ll
===================================================================
--- test/Transforms/InstCombine/saturating-add-sub.ll
+++ test/Transforms/InstCombine/saturating-add-sub.ll
@@ -13,9 +13,8 @@
 ; Can combine uadds with constant operands.
 define i8 @test_scalar_uadd_combine(i8 %a) {
 ; CHECK-LABEL: @test_scalar_uadd_combine(
-; CHECK-NEXT:    [[X1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 10)
-; CHECK-NEXT:    [[X2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[X1]], i8 20)
-; CHECK-NEXT:    ret i8 [[X2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %x1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
   %x2 = call i8 @llvm.uadd.sat.i8(i8 %x1, i8 20)
@@ -24,9 +23,8 @@
 
 define <2 x i8> @test_vector_uadd_combine(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_uadd_combine(
-; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
-; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 20, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
   %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
@@ -48,9 +46,7 @@
 ; Can combine uadds even if they overflow.
 define i8 @test_scalar_uadd_overflow(i8 %a) {
 ; CHECK-LABEL: @test_scalar_uadd_overflow(
-; CHECK-NEXT:    [[Y1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 100)
-; CHECK-NEXT:    [[Y2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[Y1]], i8 -56)
-; CHECK-NEXT:    ret i8 [[Y2]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %y1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 100)
   %y2 = call i8 @llvm.uadd.sat.i8(i8 %y1, i8 200)
@@ -59,9 +55,7 @@
 
 define <2 x i8> @test_vector_uadd_overflow(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_uadd_overflow(
-; CHECK-NEXT:    [[Y1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 100, i8 100>)
-; CHECK-NEXT:    [[Y2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[Y1]], <2 x i8> <i8 -56, i8 -56>)
-; CHECK-NEXT:    ret <2 x i8> [[Y2]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %y1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
   %y2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
@@ -71,9 +65,8 @@
 ; Can combine sadds if sign matches.
 define i8 @test_scalar_sadd_both_positive(i8 %a) {
 ; CHECK-LABEL: @test_scalar_sadd_both_positive(
-; CHECK-NEXT:    [[Z1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
-; CHECK-NEXT:    [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[Z1]], i8 20)
-; CHECK-NEXT:    ret i8 [[Z2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %z1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
   %z2 = call i8 @llvm.sadd.sat.i8(i8 %z1, i8 20)
@@ -82,9 +75,8 @@
 
 define <2 x i8> @test_vector_sadd_both_positive(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_sadd_both_positive(
-; CHECK-NEXT:    [[Z1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
-; CHECK-NEXT:    [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[Z1]], <2 x i8> <i8 20, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[Z2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %z1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
   %z2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
@@ -93,9 +85,8 @@
 
 define i8 @test_scalar_sadd_both_negative(i8 %a) {
 ; CHECK-LABEL: @test_scalar_sadd_both_negative(
-; CHECK-NEXT:    [[U1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
-; CHECK-NEXT:    [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[U1]], i8 -20)
-; CHECK-NEXT:    ret i8 [[U2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %u1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
   %u2 = call i8 @llvm.sadd.sat.i8(i8 %u1, i8 -20)
@@ -104,9 +95,8 @@
 
 define <2 x i8> @test_vector_sadd_both_negative(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_sadd_both_negative(
-; CHECK-NEXT:    [[U1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
-; CHECK-NEXT:    [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[U1]], <2 x i8> <i8 -20, i8 -20>)
-; CHECK-NEXT:    ret <2 x i8> [[U2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %u1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
   %u2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
@@ -140,9 +130,7 @@
 ; neg uadd neg always overflows.
 define i8 @test_scalar_uadd_neg_neg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_uadd_neg_neg(
-; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NEG]], i8 -10)
-; CHECK-NEXT:    ret i8 [[R]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %a_neg = or i8 %a, -128
   %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 -10)
@@ -151,9 +139,7 @@
 
 define <2 x i8> @test_vector_uadd_neg_neg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_uadd_neg_neg(
-; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
-; CHECK-NEXT:    ret <2 x i8> [[R]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
   %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
@@ -164,7 +150,7 @@
 define i8 @test_scalar_uadd_nneg_nneg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_uadd_nneg_nneg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NNEG]], i8 10)
+; CHECK-NEXT:    [[R:%.*]] = add nuw i8 [[A_NNEG]], 10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_nneg = and i8 %a, 127
@@ -175,7 +161,7 @@
 define <2 x i8> @test_vector_uadd_nneg_nneg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_uadd_nneg_nneg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[R:%.*]] = add nuw <2 x i8> [[A_NNEG]], <i8 10, i8 20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
@@ -210,7 +196,7 @@
 define i8 @test_scalar_sadd_neg_nneg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_sadd_neg_nneg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A_NEG]], 10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_neg = or i8 %a, -128
@@ -221,7 +207,7 @@
 define <2 x i8> @test_vector_sadd_neg_nneg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_sadd_neg_nneg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
@@ -233,7 +219,7 @@
 define i8 @test_scalar_sadd_nneg_neg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_sadd_nneg_neg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NNEG]], i8 -10)
+; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_nneg = and i8 %a, 127
@@ -244,7 +230,7 @@
 define <2 x i8> @test_vector_sadd_nneg_neg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_sadd_nneg_neg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
@@ -287,9 +273,8 @@
 ; Can combine usubs with constant operands.
 define i8 @test_scalar_usub_combine(i8 %a) {
 ; CHECK-LABEL: @test_scalar_usub_combine(
-; CHECK-NEXT:    [[X1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
-; CHECK-NEXT:    [[X2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[X1]], i8 20)
-; CHECK-NEXT:    ret i8 [[X2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %x1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
   %x2 = call i8 @llvm.usub.sat.i8(i8 %x1, i8 20)
@@ -298,9 +283,8 @@
 
 define <2 x i8> @test_vector_usub_combine(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_usub_combine(
-; CHECK-NEXT:    [[X1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
-; CHECK-NEXT:    [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 20, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
   %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
@@ -322,9 +306,7 @@
 ; Can combine usubs even if they overflow.
 define i8 @test_scalar_usub_overflow(i8 %a) {
 ; CHECK-LABEL: @test_scalar_usub_overflow(
-; CHECK-NEXT:    [[Y1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 100)
-; CHECK-NEXT:    [[Y2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y1]], i8 -56)
-; CHECK-NEXT:    ret i8 [[Y2]]
+; CHECK-NEXT:    ret i8 0
 ;
   %y1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 100)
   %y2 = call i8 @llvm.usub.sat.i8(i8 %y1, i8 200)
@@ -333,9 +315,7 @@
 
 define <2 x i8> @test_vector_usub_overflow(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_usub_overflow(
-; CHECK-NEXT:    [[Y1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 100, i8 100>)
-; CHECK-NEXT:    [[Y2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[Y1]], <2 x i8> <i8 -56, i8 -56>)
-; CHECK-NEXT:    ret <2 x i8> [[Y2]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %y1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
   %y2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
@@ -345,9 +325,8 @@
 ; Can combine ssubs if sign matches.
 define i8 @test_scalar_ssub_both_positive(i8 %a) {
 ; CHECK-LABEL: @test_scalar_ssub_both_positive(
-; CHECK-NEXT:    [[Z1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 10)
-; CHECK-NEXT:    [[Z2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[Z1]], i8 20)
-; CHECK-NEXT:    ret i8 [[Z2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %z1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
   %z2 = call i8 @llvm.ssub.sat.i8(i8 %z1, i8 20)
@@ -356,9 +335,8 @@
 
 define <2 x i8> @test_vector_ssub_both_positive(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_ssub_both_positive(
-; CHECK-NEXT:    [[Z1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 10>)
-; CHECK-NEXT:    [[Z2:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[Z1]], <2 x i8> <i8 20, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[Z2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %z1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
   %z2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
@@ -367,9 +345,8 @@
 
 define i8 @test_scalar_ssub_both_negative(i8 %a) {
 ; CHECK-LABEL: @test_scalar_ssub_both_negative(
-; CHECK-NEXT:    [[U1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -10)
-; CHECK-NEXT:    [[U2:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[U1]], i8 -20)
-; CHECK-NEXT:    ret i8 [[U2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -30)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %u1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
   %u2 = call i8 @llvm.ssub.sat.i8(i8 %u1, i8 -20)
@@ -378,9 +355,8 @@
 
 define <2 x i8> @test_vector_ssub_both_negative(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_ssub_both_negative(
-; CHECK-NEXT:    [[U1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
-; CHECK-NEXT:    [[U2:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[U1]], <2 x i8> <i8 -20, i8 -20>)
-; CHECK-NEXT:    ret <2 x i8> [[U2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
+; CHECK-NEXT:    ret <2 x i8> [[TMP1]]
 ;
   %u1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
   %u2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
@@ -414,9 +390,7 @@
 ; nneg usub neg always overflows.
 define i8 @test_scalar_usub_nneg_neg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_usub_nneg_neg(
-; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NNEG]], i8 -10)
-; CHECK-NEXT:    ret i8 [[R]]
+; CHECK-NEXT:    ret i8 0
 ;
   %a_nneg = and i8 %a, 127
   %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 -10)
@@ -425,9 +399,7 @@
 
 define <2 x i8> @test_vector_usub_nneg_neg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_usub_nneg_neg(
-; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 -10, i8 -20>)
-; CHECK-NEXT:    ret <2 x i8> [[R]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
   %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
@@ -438,7 +410,7 @@
 define i8 @test_scalar_usub_neg_nneg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_usub_neg_nneg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NEG]], i8 10)
+; CHECK-NEXT:    [[R:%.*]] = add i8 [[A_NEG]], -10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_neg = or i8 %a, -128
@@ -449,7 +421,7 @@
 define <2 x i8> @test_vector_usub_neg_nneg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_usub_neg_nneg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[R:%.*]] = add <2 x i8> [[A_NEG]], <i8 -10, i8 -20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
@@ -484,7 +456,7 @@
 define i8 @test_scalar_ssub_neg_neg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_ssub_neg_neg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A_NEG]], i8 -10)
+; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A_NEG]], 10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_neg = or i8 %a, -128
@@ -495,7 +467,7 @@
 define <2 x i8> @test_vector_ssub_neg_neg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_ssub_neg_neg(
 ; CHECK-NEXT:    [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT:    [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
@@ -507,7 +479,7 @@
 define i8 @test_scalar_ssub_nneg_nneg(i8 %a) {
 ; CHECK-LABEL: @test_scalar_ssub_nneg_nneg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
-; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A_NNEG]], i8 10)
+; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a_nneg = and i8 %a, 127
@@ -518,7 +490,7 @@
 define <2 x i8> @test_vector_ssub_nneg_nneg(<2 x i8> %a) {
 ; CHECK-LABEL: @test_vector_ssub_nneg_nneg(
 ; CHECK-NEXT:    [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
-; CHECK-NEXT:    [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
+; CHECK-NEXT:    [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %a_nneg = and <2 x i8> %a, <i8 127, i8 127>