Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp
+++ lib/Target/X86/X86ISelLowering.cpp
@@ -67,6 +67,11 @@
              "rather than promotion."),
     cl::Hidden);
 
+static cl::opt<bool> JumpIsExpensiveOverride(
+    "x86-jump-is-expensive", cl::init(true),
+    cl::desc("Do not split complex comparison logic into extra branches."),
+    cl::Hidden);
+
 // Forward declarations.
 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
                        SDValue V2);
@@ -106,6 +111,11 @@
       addBypassSlowDiv(64, 16);
   }
 
+  // Don't undo bitwise concatenations of comparisons. These are created by
+  // the IR optimizer or the program itself with more knowledge about the
+  // predictability of the comparisons than we have.
+  setJumpIsExpensive(JumpIsExpensiveOverride);
+
   if (Subtarget->isTargetKnownWindowsMSVC()) {
     // Setup Windows compiler runtime calls.
     setLibcallName(RTLIB::SDIV_I64, "_alldiv");
Index: test/CodeGen/X86/2008-02-18-TailMergingBug.ll
===================================================================
--- test/CodeGen/X86/2008-02-18-TailMergingBug.ll
+++ test/CodeGen/X86/2008-02-18-TailMergingBug.ll
@@ -1,5 +1,5 @@
 ; REQUIRES: asserts
-; RUN: llc < %s -march=x86 -mcpu=yonah -stats 2>&1 | grep "Number of block tails merged" | grep 16
+; RUN: llc < %s -march=x86 -mcpu=yonah -x86-jump-is-expensive=0 -stats 2>&1 | grep "Number of block tails merged" | grep 16
 ; PR1909
 
 @.str = internal constant [48 x i8] c"transformed bounds: (%.2f, %.2f), (%.2f, %.2f)\0A\00"		; <[48 x i8]*> [#uses=1]
Index: test/CodeGen/X86/MachineBranchProb.ll
===================================================================
--- test/CodeGen/X86/MachineBranchProb.ll
+++ test/CodeGen/X86/MachineBranchProb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -print-machineinstrs=expand-isel-pseudos -x86-jump-is-expensive=0 -o /dev/null 2>&1 | FileCheck %s
 
 ;; Make sure a transformation in SelectionDAGBuilder that converts "or + br" to
 ;; two branches correctly updates the branch probability.
Index: test/CodeGen/X86/cmov.ll
===================================================================
--- test/CodeGen/X86/cmov.ll
+++ test/CodeGen/X86/cmov.ll
@@ -91,7 +91,7 @@
 ; CHECK: g_100
 ; CHECK: testb
 ; CHECK-NOT: xor
-; CHECK: setne
+; CHECK: sete
 ; CHECK: testb
 
 func_4.exit.i:                                    ; preds = %bb.i.i.i, %entry
Index: test/CodeGen/X86/or-branch.ll
===================================================================
--- test/CodeGen/X86/or-branch.ll
+++ test/CodeGen/X86/or-branch.ll
@@ -1,6 +1,17 @@
-; RUN: llc < %s -march=x86  | not grep set
+; RUN: llc < %s -march=x86  | FileCheck %s
+
+; Don't break a compound comparison into multiple branches.
+; Either the program or the optimizer has created this pattern,
+; and the backend should not undo it without knowing the
+; predictability of the branches.
 
 define void @foo(i32 %X, i32 %Y, i32 %Z) nounwind {
+; CHECK-LABEL: foo:
+; CHECK:         cmpl $0, {{[0-9]+}}(%esp)
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    cmpl $5, %esi
+; CHECK-NEXT:    setl %cl
+; CHECK-NEXT:    orb %al, %cl
 entry:
 	%tmp = tail call i32 (...) @bar( )		; <i32> [#uses=0]
 	%tmp.upgrd.1 = icmp eq i32 %X, 0		; <i1> [#uses=1]
Index: test/CodeGen/X86/remat-invalid-liveness.ll
===================================================================
--- test/CodeGen/X86/remat-invalid-liveness.ll
+++ test/CodeGen/X86/remat-invalid-liveness.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -mcpu=core2 -o - | FileCheck %s
+; RUN: llc %s -mcpu=core2 -x86-jump-is-expensive=0 -o - | FileCheck %s
 ; This test was failing while tracking the liveness in the register scavenger
 ; during the branching folding pass. The allocation of the subregisters was
 ; incorrect.