Index: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2138,6 +2138,17 @@ if (isNullConstant(CarryIn)) return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1); + // fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry. + if (isNullConstant(N0) && isNullConstant(N1)) { + EVT VT = N0.getValueType(); + EVT CarryVT = CarryIn.getValueType(); + SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT); + AddToWorklist(CarryExt.getNode()); + return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt, + DAG.getConstant(1, DL, VT)), + DAG.getConstant(0, DL, CarryVT)); + } + if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N)) return Combined; Index: llvm/trunk/test/CodeGen/X86/addcarry.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/addcarry.ll +++ llvm/trunk/test/CodeGen/X86/addcarry.ll @@ -86,12 +86,12 @@ define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) { ; CHECK-LABEL: pr31719: ; CHECK: # BB#0: # %entry +; CHECK-NEXT: xorl %r10d, %r10d ; CHECK-NEXT: addq 8(%rsi), %rcx -; CHECK-NEXT: sbbq %r10, %r10 -; CHECK-NEXT: andl $1, %r10d +; CHECK-NEXT: setb %r10b +; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: addq 16(%rsi), %r8 -; CHECK-NEXT: sbbq %rax, %rax -; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: setb %al ; CHECK-NEXT: addq 24(%rsi), %r9 ; CHECK-NEXT: addq (%rsi), %rdx ; CHECK-NEXT: adcq $0, %rcx @@ -190,9 +190,9 @@ define i64 @shiftadd(i64 %a, i64 %b, i64 %c, i64 %d) { ; CHECK-LABEL: shiftadd: ; CHECK: # BB#0: # %entry +; CHECK-NEXT: leaq (%rdx,%rcx), %rax ; CHECK-NEXT: addq %rsi, %rdi -; CHECK-NEXT: adcq %rcx, %rdx -; CHECK-NEXT: movq %rdx, %rax +; CHECK-NEXT: adcq $0, %rax ; CHECK-NEXT: retq entry: %0 = zext i64 %a to i128 @@ -213,12 +213,12 @@ ; CHECK-NEXT: addq (%rsi), %rdx ; CHECK-NEXT: movq 8(%rsi), %r10 ; CHECK-NEXT: adcq $0, %r10 -; CHECK-NEXT: sbbq %rax, %rax -; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: setb %al +; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: addq %rcx, %r10 ; CHECK-NEXT: adcq 16(%rsi), %rax -; CHECK-NEXT: sbbq %rcx, %rcx -; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: setb %cl +; CHECK-NEXT: movzbl %cl, %ecx ; CHECK-NEXT: addq %r8, %rax ; CHECK-NEXT: adcq 24(%rsi), %rcx ; CHECK-NEXT: addq %r9, %rcx Index: llvm/trunk/test/CodeGen/X86/mul-i1024.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i1024.ll +++ llvm/trunk/test/CodeGen/X86/mul-i1024.ll @@ -11,7 +11,7 @@ ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $2640, %esp # imm = 0xA50 +; X32-NEXT: subl $2632, %esp # imm = 0xA48 ; X32-NEXT: movl 8(%ebp), %eax ; X32-NEXT: movl 64(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -58,7 +58,7 @@ ; X32-NEXT: movl 20(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 24(%eax), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 28(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 32(%eax), %ecx @@ -1992,7 +1992,7 @@ ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2002,23 +2002,19 @@ ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl $0, %edi -; X32-NEXT: addl %ebx, %eax -; X32-NEXT: adcl %esi, %edi -; X32-NEXT: movl $0, %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %ebx, %edi +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -2035,8 +2031,14 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ecx, %edx -; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -2045,157 +2047,144 @@ ; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %ecx -; X32-NEXT: movl %ecx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: addl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %esi, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %esi -; X32-NEXT: adcl %ebx, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %ebx, %eax +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: setb %dl +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl %esi, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: addl %esi, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl %edx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %eax -; X32-NEXT: adcl %edx, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %ebx -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -2215,16 +2204,15 @@ ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %edx, %esi ; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload @@ -2246,7 +2234,7 @@ ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx @@ -2268,16 +2256,15 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -2306,112 +2293,97 @@ ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %edi, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movzbl %al, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl (%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %eax +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2429,16 +2401,14 @@ ; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl %ebx, %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl $0, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2447,16 +2417,16 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, %edi -; X32-NEXT: movl %edi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: movl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %eax -; X32-NEXT: movl %eax, %edi +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2467,10 +2437,10 @@ ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx -; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -2485,52 +2455,50 @@ ; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill -; X32-NEXT: movl (%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %edi, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl %esi, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, (%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2550,16 +2518,15 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -2582,7 +2549,7 @@ ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx @@ -2603,16 +2570,15 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -2639,105 +2605,88 @@ ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: adcl $0, %edi ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %edx, %esi +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %al, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, %eax +; X32-NEXT: movl %eax, %edi +; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2766,76 +2715,70 @@ ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edi, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %eax, %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %edx +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %ebx, %edi +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: setb %dl +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl %dl, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %ebx, %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: adcl %edi, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl (%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %ecx, %ebx +; X32-NEXT: movl %edx, %ebx ; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: movl %ecx, %edi -; X32-NEXT: adcl %eax, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %edx, %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %eax, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %ecx, %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -2847,55 +2790,53 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %ebx +; X32-NEXT: adcl %esi, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2915,20 +2856,19 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload @@ -2968,16 +2908,15 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -3004,109 +2943,87 @@ ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: adcl $0, %edi ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %esi +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %al, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %ecx, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -3127,77 +3044,54 @@ ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl $0, %esi ; X32-NEXT: adcl $0, %edx -; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %esi -; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx @@ -3214,37 +3108,35 @@ ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %eax, %ecx -; X32-NEXT: movl $0, %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -3264,38 +3156,37 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl (%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: addl %edx, %ecx -; X32-NEXT: movl (%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl %esi, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl %edi, %esi -; X32-NEXT: adcl %ebx, %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill @@ -3319,15 +3210,14 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -3337,118 +3227,113 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl %edx, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl %esi, %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %ebx, %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl %esi, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %ecx, %esi +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: setb %cl +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %cl, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edi, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %edx, %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %esi, %edx +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, %eax +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -3462,37 +3347,35 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill @@ -3500,18 +3383,17 @@ ; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edi -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %ecx, %edi +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload @@ -3555,14 +3437,13 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -3612,136 +3493,120 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %ecx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %ebx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %edx, %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: movl %esi, %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %ecx -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edi, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %esi, %eax -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %ebx, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -3760,16 +3625,15 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %edi, %edx ; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -3786,79 +3650,80 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl %edx, %edi +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ecx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -3877,35 +3742,35 @@ ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %edx, %eax ; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi +; X32-NEXT: setb %dl ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -3925,44 +3790,43 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: adcl %esi, %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill @@ -3986,15 +3850,14 @@ ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -4007,10 +3870,10 @@ ; X32-NEXT: addl %edx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl %ebx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4025,116 +3888,107 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %ebx -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %edx, %ecx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: movl %ebx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -4156,10 +4010,10 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi @@ -4168,45 +4022,46 @@ ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: addl %ecx, %ebx -; X32-NEXT: adcl %edx, %eax -; X32-NEXT: movl $0, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %edi, %edi -; X32-NEXT: andl $1, %edi +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: setb %cl ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movzbl %cl, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl %ebx, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl %eax, %ebx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl %edi, %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4216,15 +4071,16 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4236,25 +4092,21 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload @@ -4264,6 +4116,11 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4278,13 +4135,13 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload @@ -4292,10 +4149,6 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -4304,6 +4157,10 @@ ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4312,67 +4169,66 @@ ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl 16(%ebp), %ebx -; X32-NEXT: movl %ecx, 4(%ebx) -; X32-NEXT: movl 16(%ebp), %ecx -; X32-NEXT: movl %eax, (%ecx) +; X32-NEXT: movl 16(%ebp), %edx +; X32-NEXT: movl %ecx, 4(%edx) +; X32-NEXT: movl %eax, (%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 8(%ecx) +; X32-NEXT: movl %eax, 8(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 12(%ecx) +; X32-NEXT: movl %eax, 12(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 16(%ecx) +; X32-NEXT: movl %eax, 16(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 20(%ecx) +; X32-NEXT: movl %eax, 20(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 24(%ecx) +; X32-NEXT: movl %eax, 24(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 28(%ecx) +; X32-NEXT: movl %eax, 28(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 32(%ecx) +; X32-NEXT: movl %eax, 32(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 36(%ecx) +; X32-NEXT: movl %eax, 36(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 40(%ecx) +; X32-NEXT: movl %eax, 40(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 44(%ecx) +; X32-NEXT: movl %eax, 44(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 48(%ecx) +; X32-NEXT: movl %eax, 48(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 52(%ecx) +; X32-NEXT: movl %eax, 52(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 56(%ecx) +; X32-NEXT: movl %eax, 56(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 60(%ecx) +; X32-NEXT: movl %eax, 60(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 64(%ecx) +; X32-NEXT: movl %eax, 64(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 68(%ecx) +; X32-NEXT: movl %eax, 68(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 72(%ecx) +; X32-NEXT: movl %eax, 72(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 76(%ecx) +; X32-NEXT: movl %eax, 76(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 80(%ecx) +; X32-NEXT: movl %eax, 80(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 84(%ecx) +; X32-NEXT: movl %eax, 84(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 88(%ecx) +; X32-NEXT: movl %eax, 88(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 92(%ecx) +; X32-NEXT: movl %eax, 92(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 96(%ecx) -; X32-NEXT: movl %edx, 100(%ecx) +; X32-NEXT: movl %eax, 96(%edx) +; X32-NEXT: movl %esi, 100(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 104(%ecx) -; X32-NEXT: movl %esi, 108(%ecx) +; X32-NEXT: movl %eax, 104(%edx) +; X32-NEXT: movl %edi, 108(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 112(%ecx) -; X32-NEXT: movl %edi, 116(%ecx) +; X32-NEXT: movl %eax, 112(%edx) +; X32-NEXT: movl %ebx, 116(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 120(%ecx) +; X32-NEXT: movl %eax, 120(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 124(%ecx) +; X32-NEXT: movl %eax, 124(%edx) ; X32-NEXT: leal -12(%ebp), %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi @@ -4390,40 +4246,41 @@ ; X64-NEXT: pushq %rbx ; X64-NEXT: subq $352, %rsp # imm = 0x160 ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 48(%rdi), %r8 -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 40(%rdi), %rcx -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 48(%rdi), %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 40(%rdi), %rbp +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq 32(%rdi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdi, %r13 -; X64-NEXT: xorl %r9d, %r9d -; X64-NEXT: mulq %r9 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: xorl %r8d, %r8d +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rdi, %rbx ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r11, %rcx -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rbp -; X64-NEXT: movq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: adcq %rdi, %rbp +; X64-NEXT: setb %bl +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: addq %rax, %rbp ; X64-NEXT: adcq %rdx, %rbx -; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r11, %r12 +; X64-NEXT: movq %r11, %r8 ; X64-NEXT: addq %rax, %r12 -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -4433,186 +4290,182 @@ ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: xorl %ebp, %ebp ; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movq %rax, %rdi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq 8(%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbp -; X64-NEXT: xorl %r9d, %r9d +; X64-NEXT: xorl %r11d, %r11d ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: addq %rcx, %r15 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r10, %r15 +; X64-NEXT: addq %rdi, %r15 ; X64-NEXT: adcq %rcx, %rbp -; X64-NEXT: movq %rcx, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: movq 16(%rsi), %rax -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rsi, %r13 +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r9 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r10, %rcx -; X64-NEXT: movq %rcx, %rsi -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: adcq %rdx, %r9 -; X64-NEXT: addq %rbp, %rsi -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: adcq %rbx, %r9 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %r8 -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %rbp -; X64-NEXT: movq (%rbp), %rax +; X64-NEXT: movq %rdi, %r14 +; X64-NEXT: addq %rax, %r14 +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq %rbp, %r14 +; X64-NEXT: adcq %rbx, %r11 +; X64-NEXT: movq %r8, %rax +; X64-NEXT: movq %r8, %rbp +; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %r9, %rax +; X64-NEXT: adcq %rcx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq (%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: xorl %r8d, %r8d ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rax +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 32(%r14), %rax +; X64-NEXT: movq 32(%r13), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 +; X64-NEXT: xorl %r8d, %r8d ; X64-NEXT: movq %rax, %r13 -; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r14 +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: addq %r13, %rax ; X64-NEXT: movq %rsi, %rax ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %rcx, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: addq %r9, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %r15, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r10, %r12 -; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r10, %rcx +; X64-NEXT: adcq %r14, %r12 +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %r9, %rax +; X64-NEXT: adcq %r11, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r9, %r10 -; X64-NEXT: movq 8(%rbp), %rax +; X64-NEXT: movq %r11, %rdi +; X64-NEXT: movq 8(%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbp, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: xorl %edx, %edx -; X64-NEXT: mulq %rdx -; X64-NEXT: xorl %r9d, %r9d -; X64-NEXT: movq %rax, %r12 -; X64-NEXT: addq %rsi, %r12 +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rsi, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r14, %r12 +; X64-NEXT: addq %rcx, %r11 ; X64-NEXT: adcq %rsi, %rbp ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: adcq %rdx, %rbx -; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq 16(%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r9 -; X64-NEXT: xorl %edi, %edi +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %r8 +; X64-NEXT: addq %rax, %r8 +; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: addq %rbp, %r8 +; X64-NEXT: movq %r8, %rax +; X64-NEXT: adcq %rbx, %r10 +; X64-NEXT: movq %rcx, %rdx +; X64-NEXT: movq %rcx, %r12 +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r9, %rdx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r14, %r9 -; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: movq %r9, %rdx -; X64-NEXT: adcq %rbx, %rsi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %r8, %rsi -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r12, %r15 +; X64-NEXT: movq %r11, %r8 +; X64-NEXT: adcq %r8, %r15 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %r15 -; X64-NEXT: adcq %rax, %r10 -; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: adcq %rax, %r14 +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: adcq %r10, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq 40(%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rdi -; X64-NEXT: xorl %r8d, %r8d -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: movq (%rsp), %rdi # 8-byte Reload -; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: xorl %r14d, %r14d +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: addq %r9, %rdi ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r13, %rcx -; X64-NEXT: adcq %rdi, %rbp -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %r13, %rdi +; X64-NEXT: adcq %r9, %rbp +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp -; X64-NEXT: adcq %rdx, %rbx +; X64-NEXT: movzbl %bl, %r11d +; X64-NEXT: adcq %rdx, %r11 ; X64-NEXT: movq 48(%rsi), %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %r8 -; X64-NEXT: addq %rax, %r8 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: adcq %rdx, %rax -; X64-NEXT: addq %rbp, %r8 -; X64-NEXT: adcq %rbx, %rax -; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %r13, %r14 -; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rcx, %r12 +; X64-NEXT: movq %r13, %rbx +; X64-NEXT: addq %rax, %rbx +; X64-NEXT: movq %r9, %rsi +; X64-NEXT: adcq %rdx, %rsi +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: adcq %r11, %rsi +; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r13, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %r15 -; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rax, %r10 +; X64-NEXT: adcq %rdi, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbx, %rcx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rsi, %r10 ; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: addq %r13, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: movq (%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %r9, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: addq %r13, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq 56(%rax), %r11 ; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: movq %rdi, %r10 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rsi, %rbx @@ -4621,15 +4474,15 @@ ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: addq %rbx, %r10 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbx, %r8 ; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r14 +; X64-NEXT: movq %rdi, %r11 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload @@ -4637,299 +4490,297 @@ ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r15 ; X64-NEXT: adcq %rdx, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rbp +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r9, %rcx -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rcx +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r9, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r10 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %rsi, %rbx -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: mulq %r14 +; X64-NEXT: setb %bl +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %r13 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: addq %r8, %rsi -; X64-NEXT: adcq %r10, %r13 +; X64-NEXT: addq %r9, %rsi +; X64-NEXT: adcq %r8, %r13 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 -; X64-NEXT: movq %rdi, %rbp -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %r10, %rbx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r14 -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq 24(%rax), %rcx -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rbp -; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r8 -; X64-NEXT: addq %rbx, %r8 +; X64-NEXT: addq %rbp, %r8 ; X64-NEXT: adcq %rdi, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: setb %dil +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rbx ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: adcq %r14, %rbp -; X64-NEXT: addq %rax, %rbx -; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: addq %r14, %rbp +; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: adcq %r9, %rbx +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: addq %rsi, %r10 ; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r13, %r8 ; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r15, %rbx -; X64-NEXT: adcq %r12, %rbp -; X64-NEXT: movl $0, %r8d -; X64-NEXT: adcq $0, %r8 -; X64-NEXT: sbbq %r10, %r10 -; X64-NEXT: andl $1, %r10d +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %r15, %rbp +; X64-NEXT: adcq %r12, %rbx +; X64-NEXT: setb %r15b ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r9, %rsi +; X64-NEXT: movq %r11, %rsi ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: movq %r12, %rax ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %r9, %rdi +; X64-NEXT: addq %r11, %rdi ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %r13 -; X64-NEXT: addq %rdi, %r13 -; X64-NEXT: adcq %rsi, %r9 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r11, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rdi, %r11 +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %sil +; X64-NEXT: movq %r12, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %r8, %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: adcq %r14, %rsi +; X64-NEXT: addq %r14, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: adcq %r9, %r14 ; X64-NEXT: addq %rax, %rcx -; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbx, %r15 -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: adcq %r8, %rcx -; X64-NEXT: adcq %r10, %rsi -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: adcq %rdx, %r14 +; X64-NEXT: addq %rbp, %r13 +; X64-NEXT: adcq %rbx, %r11 +; X64-NEXT: movzbl %r15b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload ; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: movq 24(%rax), %rbp -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 24(%rax), %rcx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r14 +; X64-NEXT: movq %rsi, %r11 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbx, %r15 -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq %rsi, %rbx +; X64-NEXT: setb %sil +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r8 ; X64-NEXT: adcq %rdx, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r11, %rbp +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rbx, %r11 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: addq %r9, %rsi -; X64-NEXT: adcq %r15, %r11 +; X64-NEXT: adcq %rbp, %rdi +; X64-NEXT: setb %cl +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %rbp +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %rsi +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq %r14, %rbx +; X64-NEXT: adcq %r15, %rsi ; X64-NEXT: adcq $0, %r8 ; X64-NEXT: adcq $0, %r10 -; X64-NEXT: movq %rdi, %rbx -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r14 ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdi, %r15 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %r12, %r13 -; X64-NEXT: mulq %r13 +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r12 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbp, %rax -; X64-NEXT: movq %rax, %rbp +; X64-NEXT: movq %rax, %r11 ; X64-NEXT: adcq %rdi, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi +; X64-NEXT: setb %dil ; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r13 +; X64-NEXT: mulq %r12 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: addq %r13, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: addq %r14, %rbx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: addq %rax, %rbx -; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %rsi, %r9 +; X64-NEXT: adcq %r14, %rbp +; X64-NEXT: addq %rax, %rdi +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: addq %rbx, %r9 ; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r11, %rbp -; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: adcq $0, %rcx -; X64-NEXT: addq %r8, %rbx -; X64-NEXT: adcq %r10, %rcx -; X64-NEXT: movl $0, %r12d -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: adcq %rsi, %r11 +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %r8, %rdi +; X64-NEXT: adcq %r10, %rbp +; X64-NEXT: setb %r9b +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rdx, %r10 ; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %r8, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r10, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %r13 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rdi, %r15 -; X64-NEXT: adcq %rsi, %r8 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r10, %rax -; X64-NEXT: movq %r10, %rbp -; X64-NEXT: mulq %r13 -; X64-NEXT: addq %r8, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %bl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rsi -; X64-NEXT: addq %r14, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: movq %r8, %rdi -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: adcq %rdx, %rdi -; X64-NEXT: addq %rbx, %r11 -; X64-NEXT: adcq %rcx, %r15 -; X64-NEXT: adcq %r12, %rsi -; X64-NEXT: adcq %r9, %rdi +; X64-NEXT: movq %r10, %rcx +; X64-NEXT: addq %r13, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq %rbx, %rsi +; X64-NEXT: movq %rbx, %r12 +; X64-NEXT: adcq %r14, %rsi +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %rsi +; X64-NEXT: addq %rdi, %r11 +; X64-NEXT: adcq %rbp, %r15 +; X64-NEXT: movzbl %r9b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload ; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, {{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax @@ -4937,9 +4788,10 @@ ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %r8, %rbp ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %r13 +; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %rsi, %rcx @@ -4948,291 +4800,279 @@ ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rcx, %r11 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rcx, %r8 ; X64-NEXT: adcq %rbx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdi, %r15 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq %r10, %r9 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq %r8, %r12 -; X64-NEXT: adcq (%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: movq %r12, %r10 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r13, %rbp -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: movq %r13, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r8, %rbx +; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %rbp +; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r13, %rax +; X64-NEXT: setb %sil +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rdx, %r15 ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq %rsi, %r8 +; X64-NEXT: movzbl %sil, %eax +; X64-NEXT: adcq %rax, %r15 ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload ; X64-NEXT: addq %r14, %rbx -; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: adcq %r8, %r15 ; X64-NEXT: adcq $0, %r9 -; X64-NEXT: adcq $0, %r12 +; X64-NEXT: adcq $0, %r10 ; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r8 ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %r14, %rcx -; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: movq 56(%rax), %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 56(%rax), %rdi ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r15 +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: adcq %rdi, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r13, %rax -; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rcx, %r14 +; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r8 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: addq %r13, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: addq %r11, %rcx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: addq %rax, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: adcq %r13, %rsi +; X64-NEXT: addq %rax, %rcx ; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbx, %r10 -; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %rbx, %r12 +; X64-NEXT: adcq %r15, %r14 +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: addq %r9, %rdi -; X64-NEXT: adcq %r12, %rsi -; X64-NEXT: movl $0, %r14d -; X64-NEXT: adcq $0, %r14 -; X64-NEXT: sbbq %r10, %r10 -; X64-NEXT: andl $1, %r10d -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: movq %r9, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %r8, %rcx -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: addq %r9, %rcx +; X64-NEXT: adcq %r10, %rsi +; X64-NEXT: setb {{[0-9]+}}(%rsp) # 1-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r15 ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq %r11, %r8 -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r9, %rax -; X64-NEXT: mulq %r15 -; X64-NEXT: addq %r8, %rax -; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r9, %rbx +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r8, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbx, %r8 +; X64-NEXT: adcq %r15, %r9 +; X64-NEXT: setb %bl +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %r9, %rax +; X64-NEXT: movzbl %bl, %edi +; X64-NEXT: adcq %rdi, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload -; X64-NEXT: addq %r13, %r15 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; X64-NEXT: adcq %rbp, %r11 +; X64-NEXT: addq %r11, %r15 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: adcq %r13, %rbp ; X64-NEXT: addq %rax, %r15 -; X64-NEXT: adcq %rdx, %r11 -; X64-NEXT: addq %rdi, %r12 -; X64-NEXT: adcq %rsi, %rbx -; X64-NEXT: adcq %r14, %r15 -; X64-NEXT: adcq %r10, %r11 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: adcq %rsi, %r8 +; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: addq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: adcq $0, %r15 -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rax, %rax -; X64-NEXT: andl $1, %eax -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdx +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, %r13 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %r8, %rbp -; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r11, %rbx +; X64-NEXT: adcq $0, %rdi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: mulq %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r12 -; X64-NEXT: addq %rbp, %r12 -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: addq %rbx, %r12 +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %bl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rbp, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq (%rsp), %r10 # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload -; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r14, %rbx -; X64-NEXT: mulq %rbx -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rbx -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r8 +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: movq %rcx, %r14 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq %rdi, %rbx -; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: addq %r13, %rbp -; X64-NEXT: adcq %r12, %rbx -; X64-NEXT: adcq $0, %r9 -; X64-NEXT: movq %r9, %r12 -; X64-NEXT: adcq $0, %r10 -; X64-NEXT: movq %r10, %r8 -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r11, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r13 +; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %bl ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rcx, %rdi -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %r11 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %r11 +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r13, %rbx +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: addq %rdi, %rax -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: adcq %rsi, %r14 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: mulq %rcx -; X64-NEXT: addq %r14, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq %rcx, %r10 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %r8, %rcx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: adcq %rsi, %rbx +; X64-NEXT: setb %cl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %r13, %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: addq %r13, %rsi +; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: addq %r14, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq %r14, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rdi -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %r12 +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: movq %r8, %r11 ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: addq %r12, %rsi +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %rcx -; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: movl $0, %r10d -; X64-NEXT: adcq $0, %r10 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, (%rsp) # 8-byte Spill +; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %r10, %rsi ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -5244,82 +5084,85 @@ ; X64-NEXT: addq %rcx, %rdi ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rdi, %r10 ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %bl ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r9 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %r14, %rsi +; X64-NEXT: addq %r13, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq %r14, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: adcq %r12, %rbx -; X64-NEXT: adcq %r10, %rsi -; X64-NEXT: adcq %r9, %rcx -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %rsi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: addq %rax, (%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq %r15, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq %r11, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rax, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq %r15, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbp, %r11 +; X64-NEXT: movq %r11, (%rsp) # 8-byte Spill +; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: movq 64(%r9), %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq 64(%rcx), %r11 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq 72(%r9), %rcx +; X64-NEXT: movq 72(%rcx), %rsi ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rsi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %rcx +; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbx, %r8 -; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %r10, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: setb %bl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r10 ; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rcx, %rdi -; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %rcx ; X64-NEXT: movq %r11, %rax -; X64-NEXT: xorl %ecx, %ecx -; X64-NEXT: mulq %rcx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload @@ -5327,7 +5170,7 @@ ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: adcq %r14, %r15 ; X64-NEXT: addq %rdi, %r12 -; X64-NEXT: adcq %rsi, %r15 +; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi @@ -5335,8 +5178,8 @@ ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi @@ -5349,165 +5192,159 @@ ; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %rbp, %rdi +; X64-NEXT: setb %sil +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %r14 +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %rbx ; X64-NEXT: adcq %rdx, %r14 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: addq %r13, %rbx ; X64-NEXT: adcq %r8, %r14 ; X64-NEXT: adcq $0, %r12 ; X64-NEXT: adcq $0, %r15 -; X64-NEXT: movq %r9, %rbp -; X64-NEXT: movq 80(%rbp), %r8 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq 80(%rbp), %rdi ; X64-NEXT: movq %r11, %rax -; X64-NEXT: movq %r11, %r9 -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %r10 -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %r10, %rcx +; X64-NEXT: addq %r8, %rcx ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq 88(%rbp), %r10 -; X64-NEXT: movq %r9, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rcx, %r9 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rcx, %r8 ; X64-NEXT: adcq %rsi, %rbp -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: setb %r11b +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbp, %rcx -; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: movq %r8, %rax +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rbp, %rsi +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: mulq %rdx -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: addq %r9, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %rdx, %rax -; X64-NEXT: movq %rdx, %r13 -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rdi, %rax -; X64-NEXT: addq %rbx, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r14, %r9 -; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: adcq %rcx, %rax +; X64-NEXT: addq %rbx, %r13 +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbp ; X64-NEXT: adcq $0, %rax -; X64-NEXT: addq %r12, %rsi -; X64-NEXT: movq %rsi, %r14 +; X64-NEXT: addq %r12, %rbp +; X64-NEXT: movq %rbp, %r8 ; X64-NEXT: adcq %r15, %rax -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movl $0, %r12d -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: sbbq %r11, %r11 -; X64-NEXT: andl $1, %r11d -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: setb %r14b +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r15 +; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: addq %r15, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq %rax, %rbx ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %sil ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq %r9, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %r14, %r15 -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rdi -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: addq %r8, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movzbl %r14b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r11, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rax, %r10 -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %r10, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: imulq %r10, %r8 -; X64-NEXT: addq %rdx, %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: imulq %rbp, %rdi +; X64-NEXT: addq %rdx, %rdi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: imulq %rbx, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: imulq %r11, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: imulq %rbp, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rcx, %rax ; X64-NEXT: addq %rdx, %rax -; X64-NEXT: addq %r9, %r11 -; X64-NEXT: adcq %r8, %rax -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %rdi +; X64-NEXT: addq %r8, %r9 +; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %rdi +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r8 -; X64-NEXT: mulq %rdi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %r10, %rbp +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: addq %rbx, %r15 ; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r8, %rax +; X64-NEXT: setb %cl +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %r12 -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rdi, %r9 -; X64-NEXT: adcq %rcx, %r12 -; X64-NEXT: addq %r11, %r9 -; X64-NEXT: adcq %r14, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rdi, %r13 +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %r12 +; X64-NEXT: addq %r9, %r13 +; X64-NEXT: adcq %r8, %r12 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload ; X64-NEXT: movq 120(%rdx), %rcx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload ; X64-NEXT: imulq %r10, %rcx @@ -5526,12 +5363,12 @@ ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: imulq %rbx, %rcx ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rdi, %rax ; X64-NEXT: addq %rdx, %rax -; X64-NEXT: addq %r11, %r13 +; X64-NEXT: addq %r11, %r9 ; X64-NEXT: adcq %rsi, %rax ; X64-NEXT: movq %rax, %r11 ; X64-NEXT: movq %rdi, %rax @@ -5540,371 +5377,367 @@ ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rsi, %rdi -; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: addq %rbp, %rdi +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %sil ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx -; X64-NEXT: addq %r13, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r9, %rax ; X64-NEXT: adcq %r11, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: adcq %r15, %rdi -; X64-NEXT: adcq %r9, %rax +; X64-NEXT: adcq %r13, %rax ; X64-NEXT: adcq %r12, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq 80(%rsi), %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: mulq %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq 80(%rsi), %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq 88(%rsi), %r10 -; X64-NEXT: movq %rsi, %r12 -; X64-NEXT: movq %r10, %rax -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq 88(%rsi), %rax +; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r9, %rbx +; X64-NEXT: addq %r8, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rbx, %r14 +; X64-NEXT: adcq %rbp, %rcx +; X64-NEXT: setb %r8b +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx -; X64-NEXT: adcq %rdi, %rbp -; X64-NEXT: movq %r9, %rax +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movzbl %r8b, %eax +; X64-NEXT: adcq %rax, %rbp +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: addq %r9, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %r11 -; X64-NEXT: addq %rbx, %r10 -; X64-NEXT: adcq %rbp, %r11 -; X64-NEXT: movq %r12, %rcx -; X64-NEXT: movq 64(%rcx), %rdi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq 72(%rcx), %r14 -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: addq %r12, %rsi +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: adcq %r8, %r10 +; X64-NEXT: addq %rbx, %rsi +; X64-NEXT: adcq %rbp, %r10 +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: movq 64(%rdi), %r13 +; X64-NEXT: movq %r13, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq 72(%rdi), %r9 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx +; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %r13, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbp, %rcx +; X64-NEXT: setb %r11b +; X64-NEXT: movq %r9, %rax +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rsi, %rbp -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: xorl %ecx, %ecx -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %r15, %r9 +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rbx ; X64-NEXT: movq %r13, %rax -; X64-NEXT: adcq %r8, %rax -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: adcq %rbx, %rax -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: movq %rax, %r13 +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %r12, %rcx +; X64-NEXT: addq %r15, %rcx +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: adcq %rbx, %r8 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: adcq $0, %r10 -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: mulq %rsi +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r14, %r12 -; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r8 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rbp, %rdi -; X64-NEXT: adcq %rsi, %r9 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rbp -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %rsi, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: addq %r12, %r15 -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %dil +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: addq %r14, %r15 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: adcq %r13, %r11 ; X64-NEXT: addq %rax, %r15 -; X64-NEXT: adcq %rdx, %r8 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq $0, %r15 -; X64-NEXT: adcq $0, %r8 -; X64-NEXT: addq %r10, %r15 -; X64-NEXT: adcq %r11, %r8 -; X64-NEXT: movl $0, %r9d -; X64-NEXT: adcq $0, %r9 -; X64-NEXT: sbbq %r13, %r13 -; X64-NEXT: andl $1, %r13d -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r11 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: adcq $0, %r11 +; X64-NEXT: addq %rsi, %r15 +; X64-NEXT: adcq %r10, %r11 +; X64-NEXT: setb %r10b +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %r8, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r12 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r14, %rbx -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: mulq %rbp +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %r8b +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %rdi ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %r8b, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %r12, %rsi +; X64-NEXT: addq %r14, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: adcq %r13, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %r15, %r10 -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %rbx +; X64-NEXT: addq %r15, %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %rbx ; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rsi +; X64-NEXT: movzbl %r10b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq 96(%rsi), %rcx -; X64-NEXT: imulq %rcx, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq 96(%rbp), %rcx +; X64-NEXT: imulq %rcx, %rdi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r11, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: movq %r12, %rsi +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rbp, %rdx -; X64-NEXT: movq 104(%rsi), %r8 -; X64-NEXT: imulq %r8, %rdi -; X64-NEXT: addq %rdx, %rdi -; X64-NEXT: movq %rdi, %r10 -; X64-NEXT: movq 112(%rsi), %rax -; X64-NEXT: movq %rsi, %rbp -; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rdi, %rdx +; X64-NEXT: movq 104(%rbp), %r8 +; X64-NEXT: imulq %r8, %rsi +; X64-NEXT: addq %rdx, %rsi +; X64-NEXT: movq %rsi, %r11 +; X64-NEXT: movq 112(%rbp), %rax +; X64-NEXT: movq %rbp, %rdi +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: imulq %rbp, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rsi, %rdx +; X64-NEXT: movq 120(%rdi), %rdi ; X64-NEXT: imulq %rbx, %rdi -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: movq 120(%rbp), %rdi -; X64-NEXT: imulq %rsi, %rdi ; X64-NEXT: addq %rdx, %rdi -; X64-NEXT: addq %r9, %r11 -; X64-NEXT: adcq %r10, %rdi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r9, %r10 +; X64-NEXT: adcq %r11, %rdi ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r9 +; X64-NEXT: movq %rbx, %rsi +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r9 ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: adcq %rcx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %r8 -; X64-NEXT: addq %rsi, %r8 -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: addq %r11, %r8 +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %rbx +; X64-NEXT: addq %r10, %rbp ; X64-NEXT: adcq %rdi, %rbx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rax, %rsi -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rsi, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: imulq %r12, %rcx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: imulq %r11, %rcx ; X64-NEXT: addq %rdx, %rcx -; X64-NEXT: movq %rcx, %rbp +; X64-NEXT: movq %rcx, %r9 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: imulq %rsi, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: imulq %r15, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rax, %r10 ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq %r14, %r13 -; X64-NEXT: imulq %rdi, %r13 -; X64-NEXT: addq %rdx, %r13 -; X64-NEXT: addq %r11, %r10 -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: mulq %r9 -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: mulq %r9 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %r14, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %r8, %r10 +; X64-NEXT: adcq %r9, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %r15, %rax +; X64-NEXT: mulq %r13 ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: addq %rdi, %rcx ; X64-NEXT: adcq $0, %r9 -; X64-NEXT: movq %r11, %rax -; X64-NEXT: movq %r12, %rbp -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rcx, %r11 -; X64-NEXT: adcq %r9, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx ; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rbp -; X64-NEXT: addq %rsi, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rcx, %rsi +; X64-NEXT: adcq %r9, %rdi +; X64-NEXT: setb %cl +; X64-NEXT: movq %r15, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: addq %r10, %rax -; X64-NEXT: adcq %r13, %rdx -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq %r15, %r11 -; X64-NEXT: adcq %r8, %rax +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: adcq %rbp, %rax ; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: addq (%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, %r9 ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq (%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, (%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 8(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 16(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 24(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 32(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 40(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 48(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 56(%rcx) -; X64-NEXT: movq %r8, 64(%rcx) -; X64-NEXT: movq %r9, 72(%rcx) -; X64-NEXT: movq %rbx, 80(%rcx) -; X64-NEXT: movq %rbp, 88(%rcx) -; X64-NEXT: movq %rdi, 96(%rcx) -; X64-NEXT: movq %r11, 104(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, (%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 8(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 16(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 24(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 32(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 40(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 48(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 56(%rcx) +; X64-NEXT: movq %r9, 64(%rcx) +; X64-NEXT: movq %r10, 72(%rcx) +; X64-NEXT: movq %rbp, 80(%rcx) +; X64-NEXT: movq %rbx, 88(%rcx) +; X64-NEXT: movq %r8, 96(%rcx) +; X64-NEXT: movq %rsi, 104(%rcx) ; X64-NEXT: movq %rax, 112(%rcx) ; X64-NEXT: movq %rdx, 120(%rcx) ; X64-NEXT: addq $352, %rsp # imm = 0x160 Index: llvm/trunk/test/CodeGen/X86/mul-i256.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i256.ll +++ llvm/trunk/test/CodeGen/X86/mul-i256.ll @@ -138,18 +138,17 @@ ; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: xorl %edx, %edx ; X32-NEXT: addl %ecx, %edi ; X32-NEXT: adcl %eax, %ebx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -205,76 +204,70 @@ ; X64-NEXT: pushq %r14 ; X64-NEXT: .Lcfi1: ; X64-NEXT: .cfi_def_cfa_offset 24 -; X64-NEXT: pushq %r12 +; X64-NEXT: pushq %rbx ; X64-NEXT: .Lcfi2: ; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: pushq %rbx ; X64-NEXT: .Lcfi3: -; X64-NEXT: .cfi_def_cfa_offset 40 +; X64-NEXT: .cfi_offset %rbx, -32 ; X64-NEXT: .Lcfi4: -; X64-NEXT: .cfi_offset %rbx, -40 -; X64-NEXT: .Lcfi5: -; X64-NEXT: .cfi_offset %r12, -32 -; X64-NEXT: .Lcfi6: ; X64-NEXT: .cfi_offset %r14, -24 -; X64-NEXT: .Lcfi7: +; X64-NEXT: .Lcfi5: ; X64-NEXT: .cfi_offset %r15, -16 ; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq (%rdi), %r14 +; X64-NEXT: movq (%rdi), %r11 ; X64-NEXT: movq 8(%rdi), %r8 -; X64-NEXT: movq 16(%rdi), %rcx -; X64-NEXT: movq 16(%rsi), %rbx -; X64-NEXT: movq (%rsi), %r12 +; X64-NEXT: movq 16(%rdi), %rbx +; X64-NEXT: movq 16(%rsi), %r10 +; X64-NEXT: movq (%rsi), %rcx ; X64-NEXT: movq 8(%rsi), %r15 ; X64-NEXT: movq 24(%rdi), %rdi -; X64-NEXT: imulq %r12, %rdi -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: imulq %rcx, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: imulq %r15, %rcx -; X64-NEXT: addq %rdx, %rcx -; X64-NEXT: movq %rbx, %rdi +; X64-NEXT: imulq %r15, %rbx +; X64-NEXT: addq %rdx, %rbx +; X64-NEXT: movq %r10, %rdi ; X64-NEXT: imulq %r8, %rdi -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: mulq %r14 -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rax, %r10 ; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: movq 24(%rsi), %rbx -; X64-NEXT: imulq %r14, %rbx -; X64-NEXT: addq %rdx, %rbx -; X64-NEXT: addq %r10, %r11 -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r12 +; X64-NEXT: movq 24(%rsi), %rdi +; X64-NEXT: imulq %r11, %rdi +; X64-NEXT: addq %rdx, %rdi +; X64-NEXT: addq %r14, %r10 +; X64-NEXT: adcq %rbx, %rdi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %r12 +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: addq %rdi, %r14 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rbx, %r11 ; X64-NEXT: adcq %rcx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %al +; X64-NEXT: movzbl %al, %ecx ; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: addq %rsi, %rax ; X64-NEXT: adcq %rcx, %rdx -; X64-NEXT: addq %r11, %rax -; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: movq %r10, (%r9) -; X64-NEXT: movq %r14, 8(%r9) +; X64-NEXT: addq %r10, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq %r14, (%r9) +; X64-NEXT: movq %r11, 8(%r9) ; X64-NEXT: movq %rax, 16(%r9) ; X64-NEXT: movq %rdx, 24(%r9) ; X64-NEXT: popq %rbx -; X64-NEXT: popq %r12 ; X64-NEXT: popq %r14 ; X64-NEXT: popq %r15 ; X64-NEXT: retq Index: llvm/trunk/test/CodeGen/X86/mul-i512.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mul-i512.ll +++ llvm/trunk/test/CodeGen/X86/mul-i512.ll @@ -74,14 +74,13 @@ ; X32-NEXT: movl 20(%eax), %edi ; X32-NEXT: movl 24(%eax), %ebx ; X32-NEXT: movl 28(%eax), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %eax ; X32-NEXT: pushl %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax @@ -107,6 +106,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -123,8 +123,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp @@ -133,10 +132,11 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: pushl %esi -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -145,25 +145,24 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -172,7 +171,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -183,14 +182,14 @@ ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp @@ -198,8 +197,8 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -213,7 +212,7 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax @@ -223,11 +222,11 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -240,20 +239,20 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -262,8 +261,8 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -274,21 +273,21 @@ ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %esi ; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -298,11 +297,11 @@ ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax -; X32-NEXT: pushl %esi -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -313,8 +312,8 @@ ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -323,7 +322,7 @@ ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl (%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -349,10 +348,10 @@ ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: pushl %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -365,18 +364,18 @@ ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -494,134 +493,142 @@ ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %esi, %eax ; X32-NEXT: adcl %edx, %ecx -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %dl ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edx, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edx, %ebx +; X32-NEXT: movl %ebx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl %edi, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %edx, %eax +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %ecx, %edx +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl (%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl %ecx, %eax +; X32-NEXT: adcl %ecx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -629,140 +636,125 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: adcl %esi, %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl (%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %esi, %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: movl %esi, %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: setb (%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movzbl %al, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %eax, %edi -; X32-NEXT: adcl %ecx, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edx, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -777,7 +769,7 @@ ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx @@ -789,25 +781,24 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %edi -; X32-NEXT: addl %eax, %esi -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -828,8 +819,8 @@ ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl %esi, %ebx -; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: addl %edi, %ebx +; X32-NEXT: adcl %esi, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi @@ -838,7 +829,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl (%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload @@ -853,7 +844,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -864,36 +855,36 @@ ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl 16(%ebp), %edi -; X32-NEXT: movl %esi, 4(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl 16(%ebp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, (%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 8(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 12(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 16(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 20(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 24(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 28(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 32(%esi) -; X32-NEXT: movl %ebx, 36(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 40(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 44(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 48(%esi) -; X32-NEXT: movl %ecx, 52(%esi) -; X32-NEXT: movl %edx, 56(%esi) -; X32-NEXT: movl %eax, 60(%esi) +; X32-NEXT: movl %edi, 4(%esi) +; X32-NEXT: movl 16(%ebp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, (%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 8(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 12(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 16(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 20(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 24(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 28(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 32(%edi) +; X32-NEXT: movl %ebx, 36(%edi) +; X32-NEXT: movl (%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 40(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 44(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 48(%edi) +; X32-NEXT: movl %ecx, 52(%edi) +; X32-NEXT: movl %edx, 56(%edi) +; X32-NEXT: movl %eax, 60(%edi) ; X32-NEXT: leal -12(%ebp), %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi @@ -912,35 +903,36 @@ ; X64-NEXT: pushq %rax ; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill ; X64-NEXT: movq 24(%rdi), %r11 -; X64-NEXT: movq 16(%rdi), %r14 +; X64-NEXT: movq 16(%rdi), %r15 ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq (%rsi), %rdx ; X64-NEXT: movq 8(%rsi), %rbp -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rsi, %r10 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %r8, %rsi +; X64-NEXT: addq %r9, %rsi ; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r15, %rax +; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %r9 ; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: setb %al +; X64-NEXT: movzbl %al, %ebx ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rbp, %r8 +; X64-NEXT: movq %rbp, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp @@ -952,46 +944,44 @@ ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r15, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r12 -; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r10, %r15 -; X64-NEXT: adcq %r13, %r12 +; X64-NEXT: adcq %r13, %rdx ; X64-NEXT: addq %rbp, %r15 -; X64-NEXT: adcq %rsi, %r12 +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq %rdx, %r12 ; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq (%rdi), %r14 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq 8(%rdi), %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 8(%rdi), %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %r11, %rsi ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: addq %rsi, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rbx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: setb %r11b +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rbx, %rbp -; X64-NEXT: adcq %rdi, %rsi -; X64-NEXT: movq %r14, %rcx +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: mulq %rdx @@ -1001,10 +991,11 @@ ; X64-NEXT: adcq %r14, %r13 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r13 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: addq %r8, %r10 ; X64-NEXT: adcq %r9, %r13 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq %rcx, %rax @@ -1012,7 +1003,7 @@ ; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r8 @@ -1027,14 +1018,14 @@ ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp +; X64-NEXT: setb %bpl ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %r9 -; X64-NEXT: adcq %rbp, %rbx +; X64-NEXT: movzbl %bpl, %eax +; X64-NEXT: adcq %rax, %rbx ; X64-NEXT: movq %r8, %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: mulq %rcx @@ -1044,16 +1035,14 @@ ; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: addq %r9, %r11 ; X64-NEXT: adcq %rbx, %r14 -; X64-NEXT: addq %r10, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: addq %r10, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r13, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, %r11 ; X64-NEXT: adcq $0, %r14 ; X64-NEXT: addq %r15, %r11 -; X64-NEXT: adcq %r12, %r14 -; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %rcx, %r13 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: setb %r9b ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r8 @@ -1072,12 +1061,12 @@ ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %sil ; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: addq %rbp, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload @@ -1088,9 +1077,10 @@ ; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r14, %rbx ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rsi +; X64-NEXT: movzbl %r9b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq 32(%rcx), %rsi @@ -1105,42 +1095,44 @@ ; X64-NEXT: movq 48(%rcx), %rax ; X64-NEXT: movq %rcx, %rbx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; X64-NEXT: imulq %r11, %rdi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r12 +; X64-NEXT: imulq %rcx, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rdi, %rdx ; X64-NEXT: movq 56(%rbx), %rbx -; X64-NEXT: imulq %rcx, %rbx +; X64-NEXT: imulq %rbp, %rbx ; X64-NEXT: addq %rdx, %rbx -; X64-NEXT: addq %r10, %r12 +; X64-NEXT: addq %r10, %r14 ; X64-NEXT: adcq %r8, %rbx -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r10 ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r8 ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rdi, %rbp ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rbp, %r13 ; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r11, %rax +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: addq %rdi, %r14 -; X64-NEXT: adcq %rsi, %r11 -; X64-NEXT: addq %r12, %r14 +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rdi, %r9 +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %r11 +; X64-NEXT: addq %r14, %r9 ; X64-NEXT: adcq %rbx, %r11 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload ; X64-NEXT: movq 56(%rdx), %rcx @@ -1152,49 +1144,50 @@ ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: imulq %r8, %rbx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: imulq %r15, %rbx ; X64-NEXT: addq %rdx, %rbx ; X64-NEXT: movq 32(%rbp), %rdi -; X64-NEXT: movq 40(%rbp), %r12 +; X64-NEXT: movq 40(%rbp), %r8 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: imulq %r12, %rcx +; X64-NEXT: imulq %r8, %rcx ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: imulq %rdi, %r13 -; X64-NEXT: addq %rdx, %r13 -; X64-NEXT: addq %rsi, %r9 -; X64-NEXT: adcq %rbx, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rdi, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %rsi, %r14 +; X64-NEXT: adcq %rbx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rdx, %r12 ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq %r12, %rax +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r12, %rbx +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rbp, %rdi -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %r13, %rdx +; X64-NEXT: addq %rbx, %rdi +; X64-NEXT: adcq %rcx, %rbp +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r14, %rax +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq %r15, %rdi -; X64-NEXT: adcq %r14, %rax +; X64-NEXT: adcq %r13, %rdi +; X64-NEXT: adcq %r9, %rax ; X64-NEXT: adcq %r11, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload Index: llvm/trunk/test/CodeGen/X86/overflow.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/overflow.ll +++ llvm/trunk/test/CodeGen/X86/overflow.ll @@ -27,16 +27,14 @@ ; X32-NEXT: addl $32, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andl $1, %edi -; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %cl +; X32-NEXT: movzbl %cl, %ecx ; X32-NEXT: movl %edi, (%esi) ; X32-NEXT: movl %eax, 4(%esi) ; X32-NEXT: movl %ecx, 8(%esi) -; X32-NEXT: movl %edx, 12(%esi) +; X32-NEXT: movl $0, 12(%esi) ; X32-NEXT: movl %esi, %eax ; X32-NEXT: leal -8(%ebp), %esp ; X32-NEXT: popl %esi