diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10197,7 +10197,10 @@ // Use a target hook because some targets may prefer to transform in the // other direction. - if (TLI.convertSelectOfConstantsToMath(VT)) { + // /!\ Avoid more complex combination if we expect to fold into a select_cc. + if ((Cond.getOpcode() != ISD::SETCC || + !TLI.isOperationLegal(ISD::SELECT_CC, VT)) && + TLI.convertSelectOfConstantsToMath(VT)) { // For any constants that differ by 1, we can transform the select into an // extend and add. const APInt &C1Val = C1->getAPIntValue(); @@ -10224,6 +10227,12 @@ return DAG.getNode(ISD::SHL, DL, VT, Cond, ShAmtC); } + // select Cond, -1, C --> or (sext Cond), C + if (C1->isAllOnes()) { + Cond = DAG.getSExtOrTrunc(Cond, DL, VT); + return DAG.getNode(ISD::OR, DL, VT, Cond, N2); + } + if (SDValue V = foldSelectOfConstantsUsingSra(N, DAG)) return V; } diff --git a/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll b/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll --- a/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll +++ b/llvm/test/CodeGen/PowerPC/ppc32-selectcc-i64.ll @@ -22,12 +22,12 @@ ; CHECK-NEXT: .LBB0_2: # %res_block ; CHECK-NEXT: cmplw 7, 8 ; CHECK-NEXT: cmplw 1, 5, 6 -; CHECK-NEXT: li 3, 1 -; CHECK-NEXT: li 4, -1 +; CHECK-NEXT: li 3, -1 ; CHECK-NEXT: crandc 20, 0, 2 ; CHECK-NEXT: crand 21, 2, 4 -; CHECK-NEXT: cror 20, 21, 20 -; CHECK-NEXT: isel 3, 4, 3, 20 +; CHECK-NEXT: crnor 20, 21, 20 +; CHECK-NEXT: isel 3, 0, 3, 20 +; CHECK-NEXT: ori 3, 3, 1 ; CHECK-NEXT: srwi 3, 3, 31 ; CHECK-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll --- a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll @@ -1203,11 +1203,12 @@ ; ; CHECK-PWR8-LABEL: setbdf1: ; CHECK-PWR8: # %bb.0: -; CHECK-PWR8-NEXT: xscmpudp cr0, f2, f1 +; CHECK-PWR8-NEXT: fcmpu cr0, f2, f1 +; CHECK-PWR8-NEXT: xscmpudp cr1, f2, f1 ; CHECK-PWR8-NEXT: li r3, 1 ; CHECK-PWR8-NEXT: li r4, -1 ; CHECK-PWR8-NEXT: iselgt r3, r4, r3 -; CHECK-PWR8-NEXT: iseleq r3, 0, r3 +; CHECK-PWR8-NEXT: isel r3, 0, r3, 4*cr1+eq ; CHECK-PWR8-NEXT: blr %t1 = fcmp nnan oeq double %b, %a %t2 = fcmp nnan ogt double %b, %a diff --git a/llvm/test/CodeGen/PowerPC/prefer-dqform.ll b/llvm/test/CodeGen/PowerPC/prefer-dqform.ll --- a/llvm/test/CodeGen/PowerPC/prefer-dqform.ll +++ b/llvm/test/CodeGen/PowerPC/prefer-dqform.ll @@ -14,14 +14,12 @@ ; CHECK-P9-LABEL: test: ; CHECK-P9: # %bb.0: # %test_entry ; CHECK-P9-NEXT: andi. r3, r6, 15 +; CHECK-P9-NEXT: li r3, 2 +; CHECK-P9-NEXT: li r10, 1 ; CHECK-P9-NEXT: lwz r4, 0(r4) ; CHECK-P9-NEXT: lwz r5, 0(r5) -; CHECK-P9-NEXT: li r11, 1 -; CHECK-P9-NEXT: addic r3, r3, -1 -; CHECK-P9-NEXT: subfe r10, r3, r3 -; CHECK-P9-NEXT: li r3, 2 -; CHECK-P9-NEXT: not r10, r10 -; CHECK-P9-NEXT: iseleq r3, r11, r3 +; CHECK-P9-NEXT: iseleq r3, r10, r3 +; CHECK-P9-NEXT: subfic r10, r3, 1 ; CHECK-P9-NEXT: add r4, r10, r4 ; CHECK-P9-NEXT: srawi r4, r4, 4 ; CHECK-P9-NEXT: addze r4, r4 @@ -68,14 +66,13 @@ ; ; CHECK-P10-LABEL: test: ; CHECK-P10: # %bb.0: # %test_entry -; CHECK-P10-NEXT: lwz r4, 0(r4) ; CHECK-P10-NEXT: andi. r3, r6, 15 ; CHECK-P10-NEXT: li r3, 2 ; CHECK-P10-NEXT: li r10, 1 +; CHECK-P10-NEXT: lwz r4, 0(r4) ; CHECK-P10-NEXT: lwz r5, 0(r5) ; CHECK-P10-NEXT: iseleq r3, r10, r3 -; CHECK-P10-NEXT: setnbc r10, eq -; CHECK-P10-NEXT: not r10, r10 +; CHECK-P10-NEXT: subfic r10, r3, 1 ; CHECK-P10-NEXT: add r4, r10, r4 ; CHECK-P10-NEXT: srawi r4, r4, 4 ; CHECK-P10-NEXT: addze r4, r4 diff --git a/llvm/test/CodeGen/X86/memcmp-more-load-pairs-x32.ll b/llvm/test/CodeGen/X86/memcmp-more-load-pairs-x32.ll --- a/llvm/test/CodeGen/X86/memcmp-more-load-pairs-x32.ll +++ b/llvm/test/CodeGen/X86/memcmp-more-load-pairs-x32.ll @@ -160,9 +160,10 @@ ; X86-NEXT: popl %esi ; X86-NEXT: retl ; X86-NEXT: .LBB9_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpw %si, %dx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: popl %esi ; X86-NEXT: retl %m = tail call i32 @memcmp(ptr %X, ptr %Y, i32 3) nounwind @@ -292,9 +293,10 @@ ; X86-NEXT: popl %esi ; X86-NEXT: retl ; X86-NEXT: .LBB16_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: popl %esi ; X86-NEXT: retl %m = tail call i32 @memcmp(ptr %X, ptr %Y, i32 5) nounwind @@ -337,9 +339,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB18_2 ; X86-NEXT: .LBB18_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB18_2: # %endblock ; X86-NEXT: shrl $31, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax @@ -373,8 +376,8 @@ ; X86-NEXT: .LBB19_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB19_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -422,8 +425,8 @@ ; X86-NEXT: .LBB21_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB21_3: # %endblock ; X86-NEXT: shrl $31, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax @@ -457,8 +460,8 @@ ; X86-NEXT: .LBB22_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB22_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -619,8 +622,8 @@ ; X86-NEXT: .LBB29_3: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB29_4: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -744,8 +747,8 @@ ; X86-NEXT: .LBB33_4: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB33_5: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -858,8 +861,8 @@ ; X86-NEXT: .LBB35_4: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB35_5: # %endblock ; X86-NEXT: shrl $31, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax @@ -907,8 +910,8 @@ ; X86-NEXT: .LBB36_4: # %res_block ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: setae %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: sbbl %edx, %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: .LBB36_5: # %endblock ; X86-NEXT: testl %edx, %edx ; X86-NEXT: setg %al diff --git a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll --- a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll +++ b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll @@ -140,11 +140,11 @@ define i32 @length3(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length3: ; X64: # %bb.0: -; X64-NEXT: movzwl (%rdi), %eax -; X64-NEXT: movzwl (%rsi), %ecx -; X64-NEXT: rolw $8, %ax +; X64-NEXT: movzwl (%rdi), %ecx +; X64-NEXT: movzwl (%rsi), %edx ; X64-NEXT: rolw $8, %cx -; X64-NEXT: cmpw %cx, %ax +; X64-NEXT: rolw $8, %dx +; X64-NEXT: cmpw %dx, %cx ; X64-NEXT: jne .LBB9_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 2(%rdi), %eax @@ -152,9 +152,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB9_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpw %dx, %cx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind ret i32 %m @@ -256,11 +257,11 @@ define i32 @length5(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length5: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB16_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -268,9 +269,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB16_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind ret i32 %m @@ -295,11 +297,11 @@ define i1 @length5_lt(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length5_lt: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB18_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -309,9 +311,10 @@ ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB18_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq @@ -340,8 +343,8 @@ ; X64-NEXT: .LBB19_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB19_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 7) nounwind @@ -383,8 +386,8 @@ ; X64-NEXT: .LBB21_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB21_3: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -516,8 +519,8 @@ ; X64-NEXT: .LBB29_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB29_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind @@ -591,8 +594,8 @@ ; X64-NEXT: .LBB33_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB33_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind @@ -661,8 +664,8 @@ ; X64-NEXT: .LBB35_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB35_3: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -692,8 +695,8 @@ ; X64-NEXT: .LBB36_2: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rcx, %rax -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB36_3: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al @@ -772,8 +775,8 @@ ; X64-NEXT: .LBB38_3: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB38_4: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 24) nounwind @@ -864,8 +867,8 @@ ; X64-NEXT: .LBB40_3: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB40_4: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -902,8 +905,8 @@ ; X64-NEXT: .LBB41_3: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rcx, %rax -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB41_4: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al @@ -999,8 +1002,8 @@ ; X64-NEXT: .LBB43_4: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB43_5: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 31) nounwind @@ -1097,8 +1100,8 @@ ; X64-NEXT: .LBB45_4: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB45_5: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1142,8 +1145,8 @@ ; X64-NEXT: .LBB46_4: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rcx, %rax -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB46_5: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al @@ -1295,8 +1298,8 @@ ; X64-NEXT: .LBB49_4: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB49_5: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 32) nounwind @@ -1408,8 +1411,8 @@ ; X64-NEXT: .LBB51_4: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB51_5: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1453,8 +1456,8 @@ ; X64-NEXT: .LBB52_4: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rcx, %rax -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB52_5: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al diff --git a/llvm/test/CodeGen/X86/memcmp-optsize-x32.ll b/llvm/test/CodeGen/X86/memcmp-optsize-x32.ll --- a/llvm/test/CodeGen/X86/memcmp-optsize-x32.ll +++ b/llvm/test/CodeGen/X86/memcmp-optsize-x32.ll @@ -88,9 +88,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB4_2 ; X86-NEXT: .LBB4_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpw %si, %dx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB4_2: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -178,9 +179,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB9_2 ; X86-NEXT: .LBB9_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB9_2: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -229,8 +231,8 @@ ; X86-NEXT: .LBB11_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB11_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/memcmp-optsize.ll b/llvm/test/CodeGen/X86/memcmp-optsize.ll --- a/llvm/test/CodeGen/X86/memcmp-optsize.ll +++ b/llvm/test/CodeGen/X86/memcmp-optsize.ll @@ -68,11 +68,11 @@ define i32 @length3(ptr %X, ptr %Y) nounwind optsize { ; X64-LABEL: length3: ; X64: # %bb.0: -; X64-NEXT: movzwl (%rdi), %eax -; X64-NEXT: movzwl (%rsi), %ecx -; X64-NEXT: rolw $8, %ax +; X64-NEXT: movzwl (%rdi), %ecx +; X64-NEXT: movzwl (%rsi), %edx ; X64-NEXT: rolw $8, %cx -; X64-NEXT: cmpw %cx, %ax +; X64-NEXT: rolw $8, %dx +; X64-NEXT: cmpw %dx, %cx ; X64-NEXT: jne .LBB4_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 2(%rdi), %eax @@ -80,9 +80,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB4_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpw %dx, %cx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind ret i32 %m @@ -146,11 +147,11 @@ define i32 @length5(ptr %X, ptr %Y) nounwind optsize { ; X64-LABEL: length5: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB9_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -158,9 +159,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB9_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind ret i32 %m @@ -257,8 +259,8 @@ ; X64-NEXT: .LBB15_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB15_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind @@ -287,8 +289,8 @@ ; X64-NEXT: .LBB16_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB16_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind diff --git a/llvm/test/CodeGen/X86/memcmp-pgso-x32.ll b/llvm/test/CodeGen/X86/memcmp-pgso-x32.ll --- a/llvm/test/CodeGen/X86/memcmp-pgso-x32.ll +++ b/llvm/test/CodeGen/X86/memcmp-pgso-x32.ll @@ -88,9 +88,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB4_2 ; X86-NEXT: .LBB4_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpw %si, %dx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB4_2: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -178,9 +179,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB9_2 ; X86-NEXT: .LBB9_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB9_2: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -229,8 +231,8 @@ ; X86-NEXT: .LBB11_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB11_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/memcmp-pgso.ll b/llvm/test/CodeGen/X86/memcmp-pgso.ll --- a/llvm/test/CodeGen/X86/memcmp-pgso.ll +++ b/llvm/test/CodeGen/X86/memcmp-pgso.ll @@ -68,11 +68,11 @@ define i32 @length3(ptr %X, ptr %Y) nounwind !prof !14 { ; X64-LABEL: length3: ; X64: # %bb.0: -; X64-NEXT: movzwl (%rdi), %eax -; X64-NEXT: movzwl (%rsi), %ecx -; X64-NEXT: rolw $8, %ax +; X64-NEXT: movzwl (%rdi), %ecx +; X64-NEXT: movzwl (%rsi), %edx ; X64-NEXT: rolw $8, %cx -; X64-NEXT: cmpw %cx, %ax +; X64-NEXT: rolw $8, %dx +; X64-NEXT: cmpw %dx, %cx ; X64-NEXT: jne .LBB4_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 2(%rdi), %eax @@ -80,9 +80,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB4_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpw %dx, %cx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind ret i32 %m @@ -146,11 +147,11 @@ define i32 @length5(ptr %X, ptr %Y) nounwind !prof !14 { ; X64-LABEL: length5: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB9_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -158,9 +159,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB9_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind ret i32 %m @@ -257,8 +259,8 @@ ; X64-NEXT: .LBB15_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB15_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind @@ -287,8 +289,8 @@ ; X64-NEXT: .LBB16_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB16_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind diff --git a/llvm/test/CodeGen/X86/memcmp-x32.ll b/llvm/test/CodeGen/X86/memcmp-x32.ll --- a/llvm/test/CodeGen/X86/memcmp-x32.ll +++ b/llvm/test/CodeGen/X86/memcmp-x32.ll @@ -188,9 +188,10 @@ ; X86-NEXT: popl %esi ; X86-NEXT: retl ; X86-NEXT: .LBB11_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpw %si, %dx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: popl %esi ; X86-NEXT: retl %m = tail call i32 @memcmp(ptr %X, ptr %Y, i32 3) nounwind @@ -320,9 +321,10 @@ ; X86-NEXT: popl %esi ; X86-NEXT: retl ; X86-NEXT: .LBB18_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: popl %esi ; X86-NEXT: retl %m = tail call i32 @memcmp(ptr %X, ptr %Y, i32 5) nounwind @@ -365,9 +367,10 @@ ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: jmp .LBB20_2 ; X86-NEXT: .LBB20_3: # %res_block -; X86-NEXT: setae %al -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB20_2: # %endblock ; X86-NEXT: shrl $31, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax @@ -401,8 +404,8 @@ ; X86-NEXT: .LBB21_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB21_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl @@ -433,8 +436,8 @@ ; X86-NEXT: .LBB22_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB22_3: # %endblock ; X86-NEXT: shrl $31, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax @@ -485,8 +488,8 @@ ; X86-NEXT: .LBB24_2: # %res_block ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: sbbl %eax, %eax +; X86-NEXT: orl $1, %eax ; X86-NEXT: .LBB24_3: # %endblock ; X86-NEXT: popl %esi ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/llvm/test/CodeGen/X86/memcmp.ll --- a/llvm/test/CodeGen/X86/memcmp.ll +++ b/llvm/test/CodeGen/X86/memcmp.ll @@ -166,11 +166,11 @@ define i32 @length3(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length3: ; X64: # %bb.0: -; X64-NEXT: movzwl (%rdi), %eax -; X64-NEXT: movzwl (%rsi), %ecx -; X64-NEXT: rolw $8, %ax +; X64-NEXT: movzwl (%rdi), %ecx +; X64-NEXT: movzwl (%rsi), %edx ; X64-NEXT: rolw $8, %cx -; X64-NEXT: cmpw %cx, %ax +; X64-NEXT: rolw $8, %dx +; X64-NEXT: cmpw %dx, %cx ; X64-NEXT: jne .LBB11_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 2(%rdi), %eax @@ -178,9 +178,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB11_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpw %dx, %cx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 3) nounwind ret i32 %m @@ -282,11 +283,11 @@ define i32 @length5(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length5: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB18_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -294,9 +295,10 @@ ; X64-NEXT: subl %ecx, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB18_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 5) nounwind ret i32 %m @@ -321,11 +323,11 @@ define i1 @length5_lt(ptr %X, ptr %Y) nounwind { ; X64-LABEL: length5_lt: ; X64: # %bb.0: -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: movl (%rsi), %ecx -; X64-NEXT: bswapl %eax +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx ; X64-NEXT: bswapl %ecx -; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: bswapl %edx +; X64-NEXT: cmpl %edx, %ecx ; X64-NEXT: jne .LBB20_3 ; X64-NEXT: # %bb.1: # %loadbb1 ; X64-NEXT: movzbl 4(%rdi), %eax @@ -335,9 +337,10 @@ ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB20_3: # %res_block -; X64-NEXT: setae %al -; X64-NEXT: movzbl %al, %eax -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq @@ -366,8 +369,8 @@ ; X64-NEXT: .LBB21_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB21_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 7) nounwind @@ -394,8 +397,8 @@ ; X64-NEXT: .LBB22_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %edx, %ecx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB22_3: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -542,8 +545,8 @@ ; X64-NEXT: .LBB31_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB31_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind @@ -600,8 +603,8 @@ ; X64-NEXT: .LBB34_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB34_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 15) nounwind @@ -628,8 +631,8 @@ ; X64-NEXT: .LBB35_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB35_3: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -657,8 +660,8 @@ ; X64-NEXT: .LBB36_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rcx, %rdx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB36_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr getelementptr inbounds ([513 x i8], ptr @.str, i32 0, i32 1), i64 15) nounwind @@ -698,8 +701,8 @@ ; X64-NEXT: .LBB38_2: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rax, %rcx -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB38_3: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al @@ -731,8 +734,8 @@ ; X64-NEXT: .LBB39_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB39_3: # %endblock ; X64-NEXT: retq %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind @@ -801,8 +804,8 @@ ; X64-NEXT: .LBB41_2: # %res_block ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rdx, %rcx -; X64-NEXT: setae %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: sbbl %eax, %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: .LBB41_3: # %endblock ; X64-NEXT: shrl $31, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -832,8 +835,8 @@ ; X64-NEXT: .LBB42_2: # %res_block ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rcx, %rax -; X64-NEXT: setae %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: sbbl %edx, %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: .LBB42_3: # %endblock ; X64-NEXT: testl %edx, %edx ; X64-NEXT: setg %al diff --git a/llvm/test/CodeGen/X86/midpoint-int.ll b/llvm/test/CodeGen/X86/midpoint-int.ll --- a/llvm/test/CodeGen/X86/midpoint-int.ll +++ b/llvm/test/CodeGen/X86/midpoint-int.ll @@ -16,11 +16,12 @@ ; X64: # %bb.0: ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %esi, %edi -; X64-NEXT: setle %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: setg %al ; X64-NEXT: movl %edi, %ecx ; X64-NEXT: cmovgl %esi, %ecx ; X64-NEXT: cmovgl %edi, %esi +; X64-NEXT: negl %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: subl %ecx, %esi ; X64-NEXT: shrl %esi ; X64-NEXT: imull %esi, %eax @@ -34,8 +35,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB0_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -45,6 +45,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB0_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax ; X86-NEXT: addl %ecx, %eax @@ -66,11 +68,12 @@ ; X64: # %bb.0: ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %esi, %edi -; X64-NEXT: setbe %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: seta %al ; X64-NEXT: movl %edi, %ecx ; X64-NEXT: cmoval %esi, %ecx ; X64-NEXT: cmoval %edi, %esi +; X64-NEXT: negl %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: subl %ecx, %esi ; X64-NEXT: shrl %esi ; X64-NEXT: imull %esi, %eax @@ -84,8 +87,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: setbe %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: seta %dl ; X86-NEXT: ja .LBB1_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -95,6 +97,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB1_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax ; X86-NEXT: addl %ecx, %eax @@ -119,11 +123,12 @@ ; X64-NEXT: movl (%rdi), %ecx ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpl %esi, %ecx -; X64-NEXT: setle %al -; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: setg %al ; X64-NEXT: movl %ecx, %edx ; X64-NEXT: cmovgl %esi, %edx ; X64-NEXT: cmovgl %ecx, %esi +; X64-NEXT: negl %eax +; X64-NEXT: orl $1, %eax ; X64-NEXT: subl %edx, %esi ; X64-NEXT: shrl %esi ; X64-NEXT: imull %esi, %eax @@ -138,8 +143,7 @@ ; X86-NEXT: movl (%ecx), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB2_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -149,6 +153,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB2_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax ; X86-NEXT: addl %ecx, %eax @@ -172,11 +178,12 @@ ; X64-NEXT: movl (%rsi), %eax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: cmpl %eax, %edi -; X64-NEXT: setle %cl -; X64-NEXT: leal -1(%rcx,%rcx), %ecx +; X64-NEXT: setg %cl ; X64-NEXT: movl %edi, %edx ; X64-NEXT: cmovgl %eax, %edx ; X64-NEXT: cmovgl %edi, %eax +; X64-NEXT: negl %ecx +; X64-NEXT: orl $1, %ecx ; X64-NEXT: subl %edx, %eax ; X64-NEXT: shrl %eax ; X64-NEXT: imull %ecx, %eax @@ -191,8 +198,7 @@ ; X86-NEXT: movl (%eax), %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB3_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -202,6 +208,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB3_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax ; X86-NEXT: addl %ecx, %eax @@ -226,11 +234,12 @@ ; X64-NEXT: movl (%rsi), %eax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpl %eax, %ecx -; X64-NEXT: setle %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: setg %dl ; X64-NEXT: movl %ecx, %esi ; X64-NEXT: cmovgl %eax, %esi ; X64-NEXT: cmovgl %ecx, %eax +; X64-NEXT: negl %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: subl %esi, %eax ; X64-NEXT: shrl %eax ; X64-NEXT: imull %edx, %eax @@ -246,8 +255,7 @@ ; X86-NEXT: movl (%eax), %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB4_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -257,6 +265,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB4_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax ; X86-NEXT: addl %ecx, %eax @@ -286,11 +296,12 @@ ; X64: # %bb.0: ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rsi, %rdi -; X64-NEXT: setle %al -; X64-NEXT: leaq -1(%rax,%rax), %rax +; X64-NEXT: setg %al ; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: cmovgq %rsi, %rcx ; X64-NEXT: cmovgq %rdi, %rsi +; X64-NEXT: negq %rax +; X64-NEXT: orq $1, %rax ; X64-NEXT: subq %rcx, %rsi ; X64-NEXT: shrq %rsi ; X64-NEXT: imulq %rsi, %rax @@ -303,38 +314,41 @@ ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %edi, %edx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $-1, %ebx +; X86-NEXT: sbbl %ebp, %edx +; X86-NEXT: setl %dl +; X86-NEXT: movzbl %dl, %ebx ; X86-NEXT: jl .LBB5_1 ; X86-NEXT: # %bb.2: -; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: movl $1, %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl %ecx, %esi +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: jmp .LBB5_3 ; X86-NEXT: .LBB5_1: -; X86-NEXT: movl $-1, %ebp ; X86-NEXT: movl %edi, %edx ; X86-NEXT: movl %eax, %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl %ecx, %eax +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: .LBB5_3: +; X86-NEXT: negl %ebx +; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: orl $1, %ebp ; X86-NEXT: subl %esi, %eax ; X86-NEXT: sbbl %edx, %edi ; X86-NEXT: shrdl $1, %edi, %eax -; X86-NEXT: imull %eax, %ebp -; X86-NEXT: mull %ebx -; X86-NEXT: addl %ebp, %edx +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: mull %ebp +; X86-NEXT: addl %ebx, %edx ; X86-NEXT: shrl %edi -; X86-NEXT: imull %ebx, %edi +; X86-NEXT: imull %ebp, %edi ; X86-NEXT: addl %edi, %edx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -356,11 +370,12 @@ ; X64: # %bb.0: ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rsi, %rdi -; X64-NEXT: setbe %al -; X64-NEXT: leaq -1(%rax,%rax), %rax +; X64-NEXT: seta %al ; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: cmovaq %rsi, %rcx ; X64-NEXT: cmovaq %rdi, %rsi +; X64-NEXT: negq %rax +; X64-NEXT: orq $1, %rax ; X64-NEXT: subq %rcx, %rsi ; X64-NEXT: shrq %rsi ; X64-NEXT: imulq %rsi, %rax @@ -373,38 +388,42 @@ ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: xorl %ebx, %ebx +; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %edi, %edx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $-1, %ebx -; X86-NEXT: jb .LBB6_1 +; X86-NEXT: sbbl %ebp, %edx +; X86-NEXT: setb %dl +; X86-NEXT: sbbl %ebx, %ebx +; X86-NEXT: testb %dl, %dl +; X86-NEXT: jne .LBB6_1 ; X86-NEXT: # %bb.2: -; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: movl $1, %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl %ecx, %esi +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: jmp .LBB6_3 ; X86-NEXT: .LBB6_1: -; X86-NEXT: movl $-1, %ebp ; X86-NEXT: movl %edi, %edx ; X86-NEXT: movl %eax, %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl %ecx, %eax +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: .LBB6_3: +; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: orl $1, %ebp ; X86-NEXT: subl %esi, %eax ; X86-NEXT: sbbl %edx, %edi ; X86-NEXT: shrdl $1, %edi, %eax -; X86-NEXT: imull %eax, %ebp -; X86-NEXT: mull %ebx -; X86-NEXT: addl %ebp, %edx +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: mull %ebp +; X86-NEXT: addl %ebx, %edx ; X86-NEXT: shrl %edi -; X86-NEXT: imull %ebx, %edi +; X86-NEXT: imull %ebp, %edi ; X86-NEXT: addl %edi, %edx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -429,11 +448,12 @@ ; X64-NEXT: movq (%rdi), %rcx ; X64-NEXT: xorl %eax, %eax ; X64-NEXT: cmpq %rsi, %rcx -; X64-NEXT: setle %al -; X64-NEXT: leaq -1(%rax,%rax), %rax +; X64-NEXT: setg %al ; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: cmovgq %rsi, %rdx ; X64-NEXT: cmovgq %rcx, %rsi +; X64-NEXT: negq %rax +; X64-NEXT: orq $1, %rax ; X64-NEXT: subq %rdx, %rsi ; X64-NEXT: shrq %rsi ; X64-NEXT: imulq %rsi, %rax @@ -455,29 +475,30 @@ ; X86-NEXT: cmpl %esi, %eax ; X86-NEXT: movl %edi, %edx ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: movl $-1, %ebx +; X86-NEXT: setl %dl +; X86-NEXT: movzbl %dl, %ebx ; X86-NEXT: jl .LBB7_1 ; X86-NEXT: # %bb.2: -; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: movl $1, %ebx ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-NEXT: movl %esi, %edx ; X86-NEXT: jmp .LBB7_3 ; X86-NEXT: .LBB7_1: -; X86-NEXT: movl $-1, %ebp ; X86-NEXT: movl %edi, (%esp) # 4-byte Spill ; X86-NEXT: movl %eax, %edx ; X86-NEXT: movl %ecx, %edi ; X86-NEXT: movl %esi, %eax ; X86-NEXT: .LBB7_3: +; X86-NEXT: negl %ebx +; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: orl $1, %ebp ; X86-NEXT: subl %edx, %eax ; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload ; X86-NEXT: shrdl $1, %edi, %eax -; X86-NEXT: imull %eax, %ebp -; X86-NEXT: mull %ebx -; X86-NEXT: addl %ebp, %edx +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: mull %ebp +; X86-NEXT: addl %ebx, %edx ; X86-NEXT: shrl %edi -; X86-NEXT: imull %ebx, %edi +; X86-NEXT: imull %ebp, %edi ; X86-NEXT: addl %edi, %edx ; X86-NEXT: addl %esi, %eax ; X86-NEXT: adcl %ecx, %edx @@ -505,11 +526,12 @@ ; X64-NEXT: movq (%rsi), %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: cmpq %rax, %rdi -; X64-NEXT: setle %cl -; X64-NEXT: leaq -1(%rcx,%rcx), %rcx +; X64-NEXT: setg %cl ; X64-NEXT: movq %rdi, %rdx ; X64-NEXT: cmovgq %rax, %rdx ; X64-NEXT: cmovgq %rdi, %rax +; X64-NEXT: negq %rcx +; X64-NEXT: orq $1, %rcx ; X64-NEXT: subq %rdx, %rax ; X64-NEXT: shrq %rax ; X64-NEXT: imulq %rcx, %rax @@ -522,39 +544,42 @@ ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl (%edx), %eax ; X86-NEXT: movl 4(%edx), %edi -; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %edi, %edx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $-1, %ebx +; X86-NEXT: sbbl %ebp, %edx +; X86-NEXT: setl %dl +; X86-NEXT: movzbl %dl, %ebx ; X86-NEXT: jl .LBB8_1 ; X86-NEXT: # %bb.2: -; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: movl $1, %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl %ecx, %esi +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: jmp .LBB8_3 ; X86-NEXT: .LBB8_1: -; X86-NEXT: movl $-1, %ebp ; X86-NEXT: movl %edi, %edx ; X86-NEXT: movl %eax, %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl %ecx, %eax +; X86-NEXT: movl %ebp, %ecx +; X86-NEXT: movl %ebp, %edi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: .LBB8_3: +; X86-NEXT: negl %ebx +; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: orl $1, %ebp ; X86-NEXT: subl %esi, %eax ; X86-NEXT: sbbl %edx, %edi ; X86-NEXT: shrdl $1, %edi, %eax -; X86-NEXT: imull %eax, %ebp -; X86-NEXT: mull %ebx -; X86-NEXT: addl %ebp, %edx +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: mull %ebp +; X86-NEXT: addl %ebx, %edx ; X86-NEXT: shrl %edi -; X86-NEXT: imull %ebx, %edi +; X86-NEXT: imull %ebp, %edi ; X86-NEXT: addl %edi, %edx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -579,11 +604,12 @@ ; X64-NEXT: movq (%rsi), %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpq %rax, %rcx -; X64-NEXT: setle %dl -; X64-NEXT: leaq -1(%rdx,%rdx), %rdx +; X64-NEXT: setg %dl ; X64-NEXT: movq %rcx, %rsi ; X64-NEXT: cmovgq %rax, %rsi ; X64-NEXT: cmovgq %rcx, %rax +; X64-NEXT: negq %rdx +; X64-NEXT: orq $1, %rdx ; X64-NEXT: subq %rsi, %rax ; X64-NEXT: shrq %rax ; X64-NEXT: imulq %rdx, %rax @@ -606,29 +632,30 @@ ; X86-NEXT: cmpl %esi, %eax ; X86-NEXT: movl %edi, %edx ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: movl $-1, %ebx +; X86-NEXT: setl %dl +; X86-NEXT: movzbl %dl, %ebx ; X86-NEXT: jl .LBB9_1 ; X86-NEXT: # %bb.2: -; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: movl $1, %ebx ; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X86-NEXT: movl %esi, %edx ; X86-NEXT: jmp .LBB9_3 ; X86-NEXT: .LBB9_1: -; X86-NEXT: movl $-1, %ebp ; X86-NEXT: movl %edi, (%esp) # 4-byte Spill ; X86-NEXT: movl %eax, %edx ; X86-NEXT: movl %ecx, %edi ; X86-NEXT: movl %esi, %eax ; X86-NEXT: .LBB9_3: +; X86-NEXT: negl %ebx +; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: orl $1, %ebp ; X86-NEXT: subl %edx, %eax ; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload ; X86-NEXT: shrdl $1, %edi, %eax -; X86-NEXT: imull %eax, %ebp -; X86-NEXT: mull %ebx -; X86-NEXT: addl %ebp, %edx +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: mull %ebp +; X86-NEXT: addl %ebx, %edx ; X86-NEXT: shrl %edi -; X86-NEXT: imull %ebx, %edi +; X86-NEXT: imull %ebp, %edi ; X86-NEXT: addl %edi, %edx ; X86-NEXT: addl %esi, %eax ; X86-NEXT: adcl %ecx, %edx @@ -660,13 +687,14 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { ; X64-LABEL: scalar_i16_signed_reg_reg: ; X64: # %bb.0: -; X64-NEXT: xorl %eax, %eax +; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: cmpw %si, %di -; X64-NEXT: setle %al -; X64-NEXT: leal -1(%rax,%rax), %ecx +; X64-NEXT: setg %cl ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cmovgl %esi, %eax ; X64-NEXT: cmovgl %edi, %esi +; X64-NEXT: negl %ecx +; X64-NEXT: orl $1, %ecx ; X64-NEXT: subl %eax, %esi ; X64-NEXT: movzwl %si, %eax ; X64-NEXT: shrl %eax @@ -682,8 +710,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpw %ax, %cx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB10_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -693,6 +720,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB10_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -714,13 +743,14 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { ; X64-LABEL: scalar_i16_unsigned_reg_reg: ; X64: # %bb.0: -; X64-NEXT: xorl %eax, %eax +; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: cmpw %si, %di -; X64-NEXT: setbe %al -; X64-NEXT: leal -1(%rax,%rax), %ecx +; X64-NEXT: seta %cl ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cmoval %esi, %eax ; X64-NEXT: cmoval %edi, %esi +; X64-NEXT: negl %ecx +; X64-NEXT: orl $1, %ecx ; X64-NEXT: subl %eax, %esi ; X64-NEXT: movzwl %si, %eax ; X64-NEXT: shrl %eax @@ -736,8 +766,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpw %ax, %cx -; X86-NEXT: setbe %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: seta %dl ; X86-NEXT: ja .LBB11_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -747,6 +776,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB11_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -771,13 +802,14 @@ ; X64-LABEL: scalar_i16_signed_mem_reg: ; X64: # %bb.0: ; X64-NEXT: movzwl (%rdi), %ecx -; X64-NEXT: xorl %eax, %eax +; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpw %si, %cx -; X64-NEXT: setle %al -; X64-NEXT: leal -1(%rax,%rax), %edx +; X64-NEXT: setg %dl ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: cmovgl %esi, %eax ; X64-NEXT: cmovgl %ecx, %esi +; X64-NEXT: negl %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: subl %eax, %esi ; X64-NEXT: movzwl %si, %eax ; X64-NEXT: shrl %eax @@ -794,8 +826,7 @@ ; X86-NEXT: movzwl (%ecx), %ecx ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpw %ax, %cx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB12_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -805,6 +836,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB12_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -830,11 +863,12 @@ ; X64-NEXT: movzwl (%rsi), %eax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: cmpw %ax, %di -; X64-NEXT: setle %cl -; X64-NEXT: leal -1(%rcx,%rcx), %ecx +; X64-NEXT: setg %cl ; X64-NEXT: movl %edi, %edx ; X64-NEXT: cmovgl %eax, %edx ; X64-NEXT: cmovgl %edi, %eax +; X64-NEXT: negl %ecx +; X64-NEXT: orl $1, %ecx ; X64-NEXT: subl %edx, %eax ; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: shrl %eax @@ -851,8 +885,7 @@ ; X86-NEXT: movzwl (%eax), %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpw %ax, %cx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB13_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -862,6 +895,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB13_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -888,11 +923,12 @@ ; X64-NEXT: movzwl (%rsi), %eax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: cmpw %ax, %cx -; X64-NEXT: setle %dl -; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: setg %dl ; X64-NEXT: movl %ecx, %esi ; X64-NEXT: cmovgl %eax, %esi ; X64-NEXT: cmovgl %ecx, %eax +; X64-NEXT: negl %edx +; X64-NEXT: orl $1, %edx ; X64-NEXT: subl %esi, %eax ; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: shrl %eax @@ -910,8 +946,7 @@ ; X86-NEXT: movzwl (%eax), %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpw %ax, %cx -; X86-NEXT: setle %dl -; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB14_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movl %ecx, %esi @@ -921,6 +956,8 @@ ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB14_3: ; X86-NEXT: subl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: orl $1, %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -952,12 +989,12 @@ ; X64: # %bb.0: ; X64-NEXT: movl %esi, %eax ; X64-NEXT: cmpb %al, %dil -; X64-NEXT: setle %cl +; X64-NEXT: setg %cl ; X64-NEXT: movl %edi, %edx ; X64-NEXT: cmovgl %esi, %edx ; X64-NEXT: cmovgl %edi, %eax -; X64-NEXT: addb %cl, %cl -; X64-NEXT: decb %cl +; X64-NEXT: negb %cl +; X64-NEXT: orb $1, %cl ; X64-NEXT: subb %dl, %al ; X64-NEXT: shrb %al ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -970,7 +1007,7 @@ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: cmpb %al, %cl -; X86-NEXT: setle %dl +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB15_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movb %cl, %ah @@ -980,8 +1017,8 @@ ; X86-NEXT: movb %cl, %al ; X86-NEXT: .LBB15_3: ; X86-NEXT: subb %ah, %al -; X86-NEXT: addb %dl, %dl -; X86-NEXT: decb %dl +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1002,12 +1039,12 @@ ; X64: # %bb.0: ; X64-NEXT: movl %esi, %eax ; X64-NEXT: cmpb %al, %dil -; X64-NEXT: setbe %cl +; X64-NEXT: seta %cl ; X64-NEXT: movl %edi, %edx ; X64-NEXT: cmoval %esi, %edx ; X64-NEXT: cmoval %edi, %eax -; X64-NEXT: addb %cl, %cl -; X64-NEXT: decb %cl +; X64-NEXT: negb %cl +; X64-NEXT: orb $1, %cl ; X64-NEXT: subb %dl, %al ; X64-NEXT: shrb %al ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1020,7 +1057,7 @@ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: cmpb %al, %cl -; X86-NEXT: setbe %dl +; X86-NEXT: seta %dl ; X86-NEXT: ja .LBB16_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movb %cl, %ah @@ -1030,8 +1067,8 @@ ; X86-NEXT: movb %cl, %al ; X86-NEXT: .LBB16_3: ; X86-NEXT: subb %ah, %al -; X86-NEXT: addb %dl, %dl -; X86-NEXT: decb %dl +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1054,13 +1091,13 @@ ; X64: # %bb.0: ; X64-NEXT: movzbl (%rdi), %ecx ; X64-NEXT: cmpb %sil, %cl -; X64-NEXT: setle %dl +; X64-NEXT: setg %dl ; X64-NEXT: movl %ecx, %edi ; X64-NEXT: cmovgl %esi, %edi ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: cmovlel %esi, %eax -; X64-NEXT: addb %dl, %dl -; X64-NEXT: decb %dl +; X64-NEXT: negb %dl +; X64-NEXT: orb $1, %dl ; X64-NEXT: subb %dil, %al ; X64-NEXT: shrb %al ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1074,7 +1111,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl (%ecx), %ecx ; X86-NEXT: cmpb %al, %cl -; X86-NEXT: setle %dl +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB17_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movb %cl, %ah @@ -1084,8 +1121,8 @@ ; X86-NEXT: movb %cl, %al ; X86-NEXT: .LBB17_3: ; X86-NEXT: subb %ah, %al -; X86-NEXT: addb %dl, %dl -; X86-NEXT: decb %dl +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1107,12 +1144,12 @@ ; X64: # %bb.0: ; X64-NEXT: movzbl (%rsi), %eax ; X64-NEXT: cmpb %al, %dil -; X64-NEXT: setle %cl +; X64-NEXT: setg %cl ; X64-NEXT: movl %edi, %edx ; X64-NEXT: cmovgl %eax, %edx ; X64-NEXT: cmovgl %edi, %eax -; X64-NEXT: addb %cl, %cl -; X64-NEXT: decb %cl +; X64-NEXT: negb %cl +; X64-NEXT: orb $1, %cl ; X64-NEXT: subb %dl, %al ; X64-NEXT: shrb %al ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1126,7 +1163,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzbl (%eax), %eax ; X86-NEXT: cmpb %al, %cl -; X86-NEXT: setle %dl +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB18_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movb %cl, %ah @@ -1136,8 +1173,8 @@ ; X86-NEXT: movb %cl, %al ; X86-NEXT: .LBB18_3: ; X86-NEXT: subb %ah, %al -; X86-NEXT: addb %dl, %dl -; X86-NEXT: decb %dl +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1160,12 +1197,12 @@ ; X64-NEXT: movzbl (%rdi), %ecx ; X64-NEXT: movzbl (%rsi), %eax ; X64-NEXT: cmpb %al, %cl -; X64-NEXT: setle %dl +; X64-NEXT: setg %dl ; X64-NEXT: movl %ecx, %esi ; X64-NEXT: cmovgl %eax, %esi ; X64-NEXT: cmovgl %ecx, %eax -; X64-NEXT: addb %dl, %dl -; X64-NEXT: decb %dl +; X64-NEXT: negb %dl +; X64-NEXT: orb $1, %dl ; X64-NEXT: subb %sil, %al ; X64-NEXT: shrb %al ; X64-NEXT: # kill: def $al killed $al killed $eax @@ -1180,7 +1217,7 @@ ; X86-NEXT: movzbl (%ecx), %ecx ; X86-NEXT: movzbl (%eax), %eax ; X86-NEXT: cmpb %al, %cl -; X86-NEXT: setle %dl +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB19_1 ; X86-NEXT: # %bb.2: ; X86-NEXT: movb %cl, %ah @@ -1190,8 +1227,8 @@ ; X86-NEXT: movb %cl, %al ; X86-NEXT: .LBB19_3: ; X86-NEXT: subb %ah, %al -; X86-NEXT: addb %dl, %dl -; X86-NEXT: decb %dl +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll --- a/llvm/test/CodeGen/X86/select.ll +++ b/llvm/test/CodeGen/X86/select.ll @@ -759,22 +759,21 @@ ; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax ; ATHLON-NEXT: xorl %edx, %edx ; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax -; ATHLON-NEXT: movl $-1, %ecx -; ATHLON-NEXT: movl $1, %eax -; ATHLON-NEXT: cmovel %ecx, %eax -; ATHLON-NEXT: cmovel %ecx, %edx +; ATHLON-NEXT: sete %dl +; ATHLON-NEXT: negl %edx +; ATHLON-NEXT: movl %edx, %eax +; ATHLON-NEXT: orl $1, %eax ; ATHLON-NEXT: retl ; ; MCU-LABEL: test10: ; MCU: # %bb.0: -; MCU-NEXT: orl %edx, %eax -; MCU-NEXT: movl $-1, %eax -; MCU-NEXT: movl $-1, %edx -; MCU-NEXT: je .LBB11_2 -; MCU-NEXT: # %bb.1: +; MCU-NEXT: movl %edx, %ecx ; MCU-NEXT: xorl %edx, %edx -; MCU-NEXT: movl $1, %eax -; MCU-NEXT: .LBB11_2: +; MCU-NEXT: orl %ecx, %eax +; MCU-NEXT: sete %dl +; MCU-NEXT: negl %edx +; MCU-NEXT: movl %edx, %eax +; MCU-NEXT: orl $1, %eax ; MCU-NEXT: retl %cmp = icmp eq i64 %x, 0 %cond = select i1 %cmp, i64 -1, i64 1 @@ -932,22 +931,21 @@ ; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax ; ATHLON-NEXT: xorl %edx, %edx ; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax -; ATHLON-NEXT: movl $-1, %ecx -; ATHLON-NEXT: movl $42, %eax -; ATHLON-NEXT: cmovel %ecx, %eax -; ATHLON-NEXT: cmovel %ecx, %edx +; ATHLON-NEXT: sete %dl +; ATHLON-NEXT: negl %edx +; ATHLON-NEXT: movl %edx, %eax +; ATHLON-NEXT: orl $42, %eax ; ATHLON-NEXT: retl ; ; MCU-LABEL: eqzero_all_ones_or_const: ; MCU: # %bb.0: -; MCU-NEXT: orl %edx, %eax -; MCU-NEXT: movl $-1, %eax -; MCU-NEXT: movl $-1, %edx -; MCU-NEXT: je .LBB16_2 -; MCU-NEXT: # %bb.1: +; MCU-NEXT: movl %edx, %ecx ; MCU-NEXT: xorl %edx, %edx -; MCU-NEXT: movl $42, %eax -; MCU-NEXT: .LBB16_2: +; MCU-NEXT: orl %ecx, %eax +; MCU-NEXT: sete %dl +; MCU-NEXT: negl %edx +; MCU-NEXT: movl %edx, %eax +; MCU-NEXT: orl $42, %eax ; MCU-NEXT: retl %z = icmp eq i64 %x, 0 %r = select i1 %z, i64 -1, i64 42 diff --git a/llvm/test/CodeGen/X86/select_const.ll b/llvm/test/CodeGen/X86/select_const.ll --- a/llvm/test/CodeGen/X86/select_const.ll +++ b/llvm/test/CodeGen/X86/select_const.ll @@ -206,9 +206,9 @@ define i32 @select_lea_2(i1 zeroext %cond) { ; CHECK-LABEL: select_lea_2: ; CHECK: # %bb.0: -; CHECK-NEXT: xorb $1, %dil -; CHECK-NEXT: movzbl %dil, %eax -; CHECK-NEXT: leal -1(%rax,%rax), %eax +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: negl %eax +; CHECK-NEXT: orl $1, %eax ; CHECK-NEXT: retq %sel = select i1 %cond, i32 -1, i32 1 ret i32 %sel @@ -284,8 +284,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: cmpl $43, %edi -; CHECK-NEXT: setl %al -; CHECK-NEXT: leal -1(,%rax,4), %eax +; CHECK-NEXT: setge %al +; CHECK-NEXT: negl %eax +; CHECK-NEXT: orl $3, %eax ; CHECK-NEXT: # kill: def $ax killed $ax killed $eax ; CHECK-NEXT: retq %cmp = icmp sgt i32 %x, 42 diff --git a/llvm/test/CodeGen/X86/statepoint-vreg.ll b/llvm/test/CodeGen/X86/statepoint-vreg.ll --- a/llvm/test/CodeGen/X86/statepoint-vreg.ll +++ b/llvm/test/CodeGen/X86/statepoint-vreg.ll @@ -435,7 +435,7 @@ ; CHECK-NEXT: .Ltmp14: ; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: movss %xmm0, {{[-0-9]*}}(%rsp) +; CHECK-NEXT: movss %xmm0, (%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)