diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -11643,11 +11643,10 @@
   // This transformation is beneficial because visitBRCOND can fold
   // BRCOND(FREEZE(X)) to BRCOND(X).
 
+  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
   // Conservatively optimize integer comparisons only.
   if (PreferSetCC) {
     // Do this only when SETCC is going to be used by BRCOND.
-
-    SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
     ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
     ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
     bool Updated = false;
@@ -11683,6 +11682,29 @@
       return DAG.getFreeze(DAG.getSetCC(SDLoc(N), VT, N0, N1, Cond));
   }
 
+  if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+      N0->getOpcode() == ISD::ABS) {
+    // if C == -C (C == 0/IntMin)
+    //    ABS(X, false) eq C -> X eq C
+    //    ABS(X, false) ne C -> X ne C
+    //    ABS(X, true) eq 0 -> X eq 0
+    //    ABS(X, true) ne 0 -> X ne 0
+    //    ABS(X, true) eq IntMin -> false
+    //    ABS(X, true) ne IntMin -> false
+    if (auto *C = isConstOrConstSplat(N1)) {
+      if (C->getAPIntValue() == -(C->getAPIntValue())) {
+        // If IntMin is poision.
+        if (N0->getNumOperands() == 2 && C->getAPIntValue().isMinSignedValue())
+          if (auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
+            if (C1->getAPIntValue().isOne())
+              return DAG.getConstant(Cond == ISD::SETEQ ? 0 : 1, SDLoc(N), VT);
+
+        // Otherwise X eq/ne C.
+        return DAG.getSetCC(SDLoc(N), VT, N0->getOperand(0), N1, Cond);
+      }
+    }
+  }
+
   SDValue Combined = SimplifySetCC(VT, N->getOperand(0), N->getOperand(1), Cond,
                                    SDLoc(N), !PreferSetCC);
 
diff --git a/llvm/test/CodeGen/X86/abs-0-and-intmin.ll b/llvm/test/CodeGen/X86/abs-0-and-intmin.ll
--- a/llvm/test/CodeGen/X86/abs-0-and-intmin.ll
+++ b/llvm/test/CodeGen/X86/abs-0-and-intmin.ll
@@ -15,20 +15,13 @@
 define i1 @absT_eq_0_i8(i8 %x) {
 ; X86-LABEL: absT_eq_0_i8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarb $7, %cl
-; X86-NEXT:    xorb %cl, %al
-; X86-NEXT:    subb %cl, %al
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_eq_0_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    sarb $7, %al
-; X64-NEXT:    xorb %al, %dil
-; X64-NEXT:    subb %al, %dil
+; X64-NEXT:    testb %dil, %dil
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %ax = call i8 @llvm.abs.i8(i8 %x, i1 true)
@@ -39,20 +32,13 @@
 define i1 @absF_eq_0_i8(i8 %x) {
 ; X86-LABEL: absF_eq_0_i8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarb $7, %cl
-; X86-NEXT:    xorb %cl, %al
-; X86-NEXT:    subb %cl, %al
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_eq_0_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    sarb $7, %al
-; X64-NEXT:    xorb %al, %dil
-; X64-NEXT:    subb %al, %dil
+; X64-NEXT:    testb %dil, %dil
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %ax = call i8 @llvm.abs.i8(i8 %x, i1 false)
@@ -63,20 +49,13 @@
 define i1 @absT_ne_0_i16(i16 %x) {
 ; X86-LABEL: absT_ne_0_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movswl %ax, %ecx
-; X86-NEXT:    sarl $15, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    cmpw %cx, %ax
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_ne_0_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    negw %ax
-; X64-NEXT:    cmovsw %di, %ax
-; X64-NEXT:    testw %ax, %ax
+; X64-NEXT:    testw %di, %di
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
   %ax = call i16 @llvm.abs.i16(i16 %x, i1 true)
@@ -87,20 +66,13 @@
 define i1 @absF_ne_0_i16(i16 %x) {
 ; X86-LABEL: absF_ne_0_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movswl %ax, %ecx
-; X86-NEXT:    sarl $15, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    cmpw %cx, %ax
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_ne_0_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    negw %ax
-; X64-NEXT:    cmovsw %di, %ax
-; X64-NEXT:    testw %ax, %ax
+; X64-NEXT:    testw %di, %di
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
   %ax = call i16 @llvm.abs.i16(i16 %x, i1 false)
@@ -111,21 +83,13 @@
 define i1 @absT_eq_0_i32(i32 %x) {
 ; X86-LABEL: absT_eq_0_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_eq_0_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    negl %eax
-; X64-NEXT:    cmovsl %edi, %eax
-; X64-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    cmpl $-2147483648, %edi # imm = 0x80000000
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %ax = call i32 @llvm.abs.i32(i32 %x, i1 true)
@@ -136,21 +100,13 @@
 define i1 @absF_eq_0_i32(i32 %x) {
 ; X86-LABEL: absF_eq_0_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_eq_0_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    negl %eax
-; X64-NEXT:    cmovsl %edi, %eax
-; X64-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT:    cmpl $-2147483648, %edi # imm = 0x80000000
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
   %ax = call i32 @llvm.abs.i32(i32 %x, i1 false)
@@ -161,26 +117,16 @@
 define i1 @absT_ne_0_i64(i64 %x) {
 ; X86-LABEL: absT_ne_0_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    subl %ecx, %edx
-; X86-NEXT:    sbbl %ecx, %eax
-; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_ne_0_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    negq %rax
-; X64-NEXT:    cmovsq %rdi, %rax
-; X64-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
-; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; X64-NEXT:    cmpq %rax, %rdi
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
   %ax = call i64 @llvm.abs.i64(i64 %x, i1 true)
@@ -191,26 +137,16 @@
 define i1 @absF_ne_0_i64(i64 %x) {
 ; X86-LABEL: absF_ne_0_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    subl %ecx, %edx
-; X86-NEXT:    sbbl %ecx, %eax
-; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_ne_0_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    negq %rax
-; X64-NEXT:    cmovsq %rdi, %rax
-; X64-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
-; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; X64-NEXT:    cmpq %rax, %rdi
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
   %ax = call i64 @llvm.abs.i64(i64 %x, i1 false)
@@ -221,31 +157,19 @@
 define <2 x i1> @absT_eq_0_2xi8(<2 x i8> %x) {
 ; X86-LABEL: absT_eq_0_2xi8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %cl
-; X86-NEXT:    subb %dl, %cl
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %al
-; X86-NEXT:    subb %dl, %al
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %al
-; X86-NEXT:    testb %cl, %cl
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %dl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_eq_0_2xi8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    psubb %xmm0, %xmm2
-; X64-NEXT:    pminub %xmm0, %xmm2
-; X64-NEXT:    pcmpeqb %xmm1, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; X64-NEXT:    pcmpeqb %xmm0, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
 ; X64-NEXT:    retq
   %ax = call <2 x i8> @llvm.abs.v2i8(<2 x i8> %x, i1 true)
   %cmp = icmp eq <2 x i8> %ax, <i8 0, i8 0>
@@ -256,18 +180,14 @@
 ; X86-LABEL: absT_eq_0_2xi8_fail:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %cl
-; X86-NEXT:    subb %dl, %cl
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %al
-; X86-NEXT:    subb %dl, %al
-; X86-NEXT:    sete %al
-; X86-NEXT:    cmpb $2, %cl
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sarb $7, %cl
+; X86-NEXT:    xorb %cl, %al
+; X86-NEXT:    subb %cl, %al
+; X86-NEXT:    cmpb $2, %al
 ; X86-NEXT:    sete %dl
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_eq_0_2xi8_fail:
@@ -288,31 +208,19 @@
 define <2 x i1> @absF_eq_0_2xi8(<2 x i8> %x) {
 ; X86-LABEL: absF_eq_0_2xi8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %cl
-; X86-NEXT:    subb %dl, %cl
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarb $7, %dl
-; X86-NEXT:    xorb %dl, %al
-; X86-NEXT:    subb %dl, %al
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %al
-; X86-NEXT:    testb %cl, %cl
+; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %dl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_eq_0_2xi8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    psubb %xmm0, %xmm2
-; X64-NEXT:    pminub %xmm0, %xmm2
-; X64-NEXT:    pcmpeqb %xmm1, %xmm2
-; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; X64-NEXT:    pcmpeqb %xmm0, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
 ; X64-NEXT:    retq
   %ax = call <2 x i8> @llvm.abs.v2i8(<2 x i8> %x, i1 false)
   %cmp = icmp eq <2 x i8> %ax, <i8 0, i8 0>
@@ -322,36 +230,20 @@
 define <2 x i1> @absT_ne_0_2xi16(<2 x i16> %x) {
 ; X86-LABEL: absT_ne_0_2xi16:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %esi, -8
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movswl %ax, %esi
-; X86-NEXT:    movswl %cx, %edx
-; X86-NEXT:    sarl $15, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    sarl $15, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    cmpw %si, %ax
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %al
-; X86-NEXT:    cmpw %dx, %cx
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %dl
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_ne_0_2xi16:
 ; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    psubw %xmm0, %xmm2
-; X64-NEXT:    pmaxsw %xmm0, %xmm2
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,0,0]
-; X64-NEXT:    pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,5,5,5,5]
-; X64-NEXT:    pcmpeqw %xmm1, %xmm2
+; X64-NEXT:    pcmpeqw %xmm0, %xmm1
 ; X64-NEXT:    pcmpeqd %xmm0, %xmm0
-; X64-NEXT:    pxor %xmm2, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ax = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %x, i1 true)
   %cmp = icmp ne <2 x i16> %ax, <i16 0, i16 0>
@@ -361,36 +253,20 @@
 define <2 x i1> @absF_ne_0_2xi16(<2 x i16> %x) {
 ; X86-LABEL: absF_ne_0_2xi16:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %esi, -8
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movswl %ax, %esi
-; X86-NEXT:    movswl %cx, %edx
-; X86-NEXT:    sarl $15, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    sarl $15, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    cmpw %si, %ax
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %al
-; X86-NEXT:    cmpw %dx, %cx
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    setne %dl
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_ne_0_2xi16:
 ; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    psubw %xmm0, %xmm2
-; X64-NEXT:    pmaxsw %xmm0, %xmm2
-; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,0,0]
-; X64-NEXT:    pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,5,5,5,5]
-; X64-NEXT:    pcmpeqw %xmm1, %xmm2
+; X64-NEXT:    pcmpeqw %xmm0, %xmm1
 ; X64-NEXT:    pcmpeqd %xmm0, %xmm0
-; X64-NEXT:    pxor %xmm2, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ax = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %x, i1 false)
   %cmp = icmp ne <2 x i16> %ax, <i16 0, i16 0>
@@ -400,24 +276,15 @@
 define <2 x i1> @absF_ne_0_2xi16_fail(<2 x i16> %x) {
 ; X86-LABEL: absF_ne_0_2xi16_fail:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %esi, -8
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movswl %cx, %edx
-; X86-NEXT:    movswl %ax, %esi
-; X86-NEXT:    sarl $15, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    subl %esi, %eax
-; X86-NEXT:    sarl $15, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    cmpw %dx, %cx
-; X86-NEXT:    setne %dl
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    sarl $15, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    subl %ecx, %eax
 ; X86-NEXT:    cmpw $1, %ax
 ; X86-NEXT:    setne %al
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    setne %dl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_ne_0_2xi16_fail:
@@ -439,28 +306,14 @@
 define <2 x i1> @absT_eq_0_2xi32(<2 x i32> %x) {
 ; X86-LABEL: absT_eq_0_2xi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    subl %edx, %ecx
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %eax
-; X86-NEXT:    subl %edx, %eax
-; X86-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %al
-; X86-NEXT:    cmpl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %dl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_eq_0_2xi32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movdqa %xmm0, %xmm1
-; X64-NEXT:    psrad $31, %xmm1
-; X64-NEXT:    pxor %xmm1, %xmm0
-; X64-NEXT:    psubd %xmm1, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
 ; X64-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    retq
@@ -472,18 +325,9 @@
 define <2 x i1> @absT_eq_0_2xi32_fail_fixme(<2 x i32> %x) {
 ; X86-LABEL: absT_eq_0_2xi32_fail_fixme:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    subl %edx, %ecx
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %eax
-; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    sete %al
-; X86-NEXT:    cmpl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %dl
 ; X86-NEXT:    retl
 ;
@@ -504,28 +348,14 @@
 define <2 x i1> @absF_eq_0_2xi32(<2 x i32> %x) {
 ; X86-LABEL: absF_eq_0_2xi32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %ecx
-; X86-NEXT:    subl %edx, %ecx
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %eax
-; X86-NEXT:    subl %edx, %eax
-; X86-NEXT:    cmpl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %al
-; X86-NEXT:    cmpl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    cmpl $-2147483648, {{[0-9]+}}(%esp) # imm = 0x80000000
 ; X86-NEXT:    sete %dl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_eq_0_2xi32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movdqa %xmm0, %xmm1
-; X64-NEXT:    psrad $31, %xmm1
-; X64-NEXT:    pxor %xmm1, %xmm0
-; X64-NEXT:    psubd %xmm1, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
 ; X64-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    retq
@@ -537,47 +367,18 @@
 define <2 x i1> @absT_ne_0_2xi64(<2 x i64> %x) {
 ; X86-LABEL: absT_ne_0_2xi64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    .cfi_offset %esi, -12
-; X86-NEXT:    .cfi_offset %edi, -8
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %esi, %edx
-; X86-NEXT:    subl %esi, %edx
-; X86-NEXT:    sbbl %esi, %ecx
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    xorl %esi, %edi
-; X86-NEXT:    subl %esi, %edi
-; X86-NEXT:    sbbl %esi, %eax
-; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    setne %al
-; X86-NEXT:    addl $-2147483648, %ecx # imm = 0x80000000
-; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    setne %dl
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    popl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absT_ne_0_2xi64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movdqa %xmm0, %xmm1
-; X64-NEXT:    psrad $31, %xmm1
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X64-NEXT:    pxor %xmm1, %xmm0
-; X64-NEXT:    psubq %xmm1, %xmm0
 ; X64-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; X64-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -595,47 +396,18 @@
 define <2 x i1> @absF_ne_0_2xi64(<2 x i64> %x) {
 ; X86-LABEL: absF_ne_0_2xi64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    .cfi_offset %esi, -12
-; X86-NEXT:    .cfi_offset %edi, -8
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %esi, %edx
-; X86-NEXT:    subl %esi, %edx
-; X86-NEXT:    sbbl %esi, %ecx
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    xorl %esi, %edi
-; X86-NEXT:    subl %esi, %edi
-; X86-NEXT:    sbbl %esi, %eax
-; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    setne %al
-; X86-NEXT:    addl $-2147483648, %ecx # imm = 0x80000000
-; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    setne %dl
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    popl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: absF_ne_0_2xi64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movdqa %xmm0, %xmm1
-; X64-NEXT:    psrad $31, %xmm1
-; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X64-NEXT:    pxor %xmm1, %xmm0
-; X64-NEXT:    psubq %xmm1, %xmm0
 ; X64-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; X64-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0