Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6968,12 +6968,11 @@ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), LN0->getMemOperand()); - CombineTo(N, ExtLoad); SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad); - CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::SIGN_EXTEND); - return SDValue(N, 0); // Return N so it doesn't get rechecked! + CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); + return CombineTo(N, ExtLoad); // Return N so it doesn't get rechecked! } } @@ -7285,12 +7284,9 @@ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad); + ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), ISD::ZERO_EXTEND); CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); - - ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), - ISD::ZERO_EXTEND); - CombineTo(N, ExtLoad); - return SDValue(N, 0); // Return N so it doesn't get rechecked! + return CombineTo(N, ExtLoad); // Return N so it doesn't get rechecked! } } @@ -7340,11 +7336,9 @@ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0.getOperand(0)), N0.getOperand(0).getValueType(), ExtLoad); - CombineTo(N, And); CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); - ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, - ISD::ZERO_EXTEND); - return SDValue(N, 0); // Return N so it doesn't get rechecked! + ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::ZERO_EXTEND); + return CombineTo(N, And); // Return N so it doesn't get rechecked! } } } Index: test/CodeGen/X86/pr32284.ll =================================================================== --- test/CodeGen/X86/pr32284.ll +++ test/CodeGen/X86/pr32284.ll @@ -1,11 +1,96 @@ -; RUN: llc -O0 -mtriple=i686-unknown -mcpu=skx -o - %s -; RUN: llc -O0 -mtriple=x86_64-unknown -mcpu=skx -o - %s -; RUN: llc -mtriple=i686-unknown -mcpu=skx -o - %s -; RUN: llc -mtriple=x86_64-unknown -mcpu=skx -o - %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O0 -mtriple=i686-unknown -mcpu=skx -o - %s | FileCheck %s -check-prefix=6860 +; RUN: llc -O0 -mtriple=x86_64-unknown -mcpu=skx -o - %s | FileCheck %s -check-prefix=X640 +; RUN: llc -mtriple=i686-unknown -mcpu=skx -o - %s | FileCheck %s -check-prefix=686 +; RUN: llc -mtriple=x86_64-unknown -mcpu=skx -o - %s | FileCheck %s -check-prefix=X64 @c = external constant i8, align 1 define void @foo() { +; 6860-LABEL: foo: +; 6860: # BB#0: # %entry +; 6860-NEXT: subl $12, %esp +; 6860-NEXT: .Lcfi0: +; 6860-NEXT: .cfi_def_cfa_offset 16 +; 6860-NEXT: movzbl c, %eax +; 6860-NEXT: testl %eax, %eax +; 6860-NEXT: setne %cl +; 6860-NEXT: movl %eax, %edx +; 6860-NEXT: movb %dl, %ch +; 6860-NEXT: testb %ch, %ch +; 6860-NEXT: setne {{[0-9]+}}(%esp) +; 6860-NEXT: movzbl %cl, %edx +; 6860-NEXT: subl %eax, %edx +; 6860-NEXT: setle %cl +; 6860-NEXT: # implicit-def: %EAX +; 6860-NEXT: movb %cl, %al +; 6860-NEXT: andl $1, %eax +; 6860-NEXT: kmovw %eax, %k0 +; 6860-NEXT: kmovd %k0, %eax +; 6860-NEXT: movb %al, %cl +; 6860-NEXT: andb $1, %cl +; 6860-NEXT: movzbl %cl, %eax +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) +; 6860-NEXT: movl %edx, (%esp) # 4-byte Spill +; 6860-NEXT: addl $12, %esp +; 6860-NEXT: retl +; +; X640-LABEL: foo: +; X640: # BB#0: # %entry +; X640-NEXT: movzbl {{.*}}(%rip), %eax +; X640-NEXT: movl %eax, %ecx +; X640-NEXT: movb %cl, %dl +; X640-NEXT: movl %ecx, %eax +; X640-NEXT: testq %rcx, %rcx +; X640-NEXT: setne %sil +; X640-NEXT: testb %dl, %dl +; X640-NEXT: setne -{{[0-9]+}}(%rsp) +; X640-NEXT: movzbl %sil, %edi +; X640-NEXT: subl %eax, %edi +; X640-NEXT: setle %dl +; X640-NEXT: # implicit-def: %EAX +; X640-NEXT: movb %dl, %al +; X640-NEXT: andl $1, %eax +; X640-NEXT: kmovw %eax, %k0 +; X640-NEXT: kmovd %k0, %eax +; X640-NEXT: movb %al, %dl +; X640-NEXT: andb $1, %dl +; X640-NEXT: movzbl %dl, %eax +; X640-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; X640-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # 4-byte Spill +; X640-NEXT: retq +; +; 686-LABEL: foo: +; 686: # BB#0: # %entry +; 686-NEXT: subl $8, %esp +; 686-NEXT: .Lcfi0: +; 686-NEXT: .cfi_def_cfa_offset 12 +; 686-NEXT: movzbl c, %eax +; 686-NEXT: xorl %ecx, %ecx +; 686-NEXT: testl %eax, %eax +; 686-NEXT: setne %cl +; 686-NEXT: testb %al, %al +; 686-NEXT: setne {{[0-9]+}}(%esp) +; 686-NEXT: xorl %edx, %edx +; 686-NEXT: cmpl %eax, %ecx +; 686-NEXT: setle %dl +; 686-NEXT: movl %edx, {{[0-9]+}}(%esp) +; 686-NEXT: addl $8, %esp +; 686-NEXT: retl +; +; X64-LABEL: foo: +; X64: # BB#0: # %entry +; X64-NEXT: movzbl {{.*}}(%rip), %eax +; X64-NEXT: testb %al, %al +; X64-NEXT: setne -{{[0-9]+}}(%rsp) +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %cl +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpl %eax, %ecx +; X64-NEXT: setle %dl +; X64-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; X64-NEXT: retq entry: %a = alloca i8, align 1 %b = alloca i32, align 4 @@ -30,3 +115,367 @@ store i32 %conv8, i32* %b, align 4 ret void } + +@var_5 = external global i32, align 4 +@var_57 = external global i64, align 8 +@_ZN8struct_210member_2_0E = external global i64, align 8 + +define void @f1() { +; 6860-LABEL: f1: +; 6860: # BB#0: # %entry +; 6860-NEXT: pushl %ebp +; 6860-NEXT: .Lcfi1: +; 6860-NEXT: .cfi_def_cfa_offset 8 +; 6860-NEXT: pushl %ebx +; 6860-NEXT: .Lcfi2: +; 6860-NEXT: .cfi_def_cfa_offset 12 +; 6860-NEXT: pushl %edi +; 6860-NEXT: .Lcfi3: +; 6860-NEXT: .cfi_def_cfa_offset 16 +; 6860-NEXT: pushl %esi +; 6860-NEXT: .Lcfi4: +; 6860-NEXT: .cfi_def_cfa_offset 20 +; 6860-NEXT: subl $36, %esp +; 6860-NEXT: .Lcfi5: +; 6860-NEXT: .cfi_def_cfa_offset 56 +; 6860-NEXT: .Lcfi6: +; 6860-NEXT: .cfi_offset %esi, -20 +; 6860-NEXT: .Lcfi7: +; 6860-NEXT: .cfi_offset %edi, -16 +; 6860-NEXT: .Lcfi8: +; 6860-NEXT: .cfi_offset %ebx, -12 +; 6860-NEXT: .Lcfi9: +; 6860-NEXT: .cfi_offset %ebp, -8 +; 6860-NEXT: movl var_5, %eax +; 6860-NEXT: movl %eax, %ecx +; 6860-NEXT: sarl $31, %ecx +; 6860-NEXT: movl %eax, %edx +; 6860-NEXT: andl %ecx, %edx +; 6860-NEXT: subl $-1, %edx +; 6860-NEXT: sete %bl +; 6860-NEXT: movl %eax, %esi +; 6860-NEXT: xorl $208307499, %esi # imm = 0xC6A852B +; 6860-NEXT: movl %ecx, %edi +; 6860-NEXT: xorl $-2, %edi +; 6860-NEXT: orl %edi, %esi +; 6860-NEXT: setne {{[0-9]+}}(%esp) +; 6860-NEXT: movl %eax, %edi +; 6860-NEXT: subl $-1, %edi +; 6860-NEXT: sete %bh +; 6860-NEXT: movzbl %bh, %ebp +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: xorl %eax, %eax +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; 6860-NEXT: addl $7093, %eax # imm = 0x1BB5 +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; 6860-NEXT: adcxl %eax, %ecx +; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; 6860-NEXT: subl %ebp, %eax +; 6860-NEXT: sbbl $0, %ecx +; 6860-NEXT: setl %bh +; 6860-NEXT: movzbl %bh, %ebp +; 6860-NEXT: movl %ebp, var_57 +; 6860-NEXT: movl $0, var_57+4 +; 6860-NEXT: movzbl %bl, %ebp +; 6860-NEXT: movl %ebp, _ZN8struct_210member_2_0E +; 6860-NEXT: movl $0, _ZN8struct_210member_2_0E+4 +; 6860-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; 6860-NEXT: movl %esi, (%esp) # 4-byte Spill +; 6860-NEXT: addl $36, %esp +; 6860-NEXT: popl %esi +; 6860-NEXT: popl %edi +; 6860-NEXT: popl %ebx +; 6860-NEXT: popl %ebp +; 6860-NEXT: retl +; +; X640-LABEL: f1: +; X640: # BB#0: # %entry +; X640-NEXT: movslq {{.*}}(%rip), %rax +; X640-NEXT: movq %rax, %rcx +; X640-NEXT: subq $-1, %rcx +; X640-NEXT: setne %dl +; X640-NEXT: # implicit-def: %ESI +; X640-NEXT: movb %dl, %sil +; X640-NEXT: sete %dl +; X640-NEXT: andl $1, %esi +; X640-NEXT: kmovw %esi, %k0 +; X640-NEXT: # implicit-def: %ESI +; X640-NEXT: movb %dl, %sil +; X640-NEXT: andl $1, %esi +; X640-NEXT: kmovw %esi, %k1 +; X640-NEXT: movabsq $-8381627093, %rdi # imm = 0xFFFFFFFE0C6A852B +; X640-NEXT: movq %rax, %r8 +; X640-NEXT: subq %rdi, %r8 +; X640-NEXT: setne -{{[0-9]+}}(%rsp) +; X640-NEXT: movl %eax, %esi +; X640-NEXT: subl $-1, %esi +; X640-NEXT: sete %dl +; X640-NEXT: movzbl %dl, %r9d +; X640-NEXT: movl %r9d, %edi +; X640-NEXT: addq $7093, %rax # imm = 0x1BB5 +; X640-NEXT: subq %rax, %rdi +; X640-NEXT: setg %dl +; X640-NEXT: movzbl %dl, %r9d +; X640-NEXT: movl %r9d, %eax +; X640-NEXT: movq %rax, {{.*}}(%rip) +; X640-NEXT: kmovd %k1, %r9d +; X640-NEXT: movb %r9b, %dl +; X640-NEXT: andb $1, %dl +; X640-NEXT: movzbl %dl, %r9d +; X640-NEXT: movl %r9d, %eax +; X640-NEXT: movq %rax, _ZN8struct_210member_2_0E +; X640-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X640-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X640-NEXT: movl %esi, -{{[0-9]+}}(%rsp) # 4-byte Spill +; X640-NEXT: kmovw %k0, -{{[0-9]+}}(%rsp) # 2-byte Spill +; X640-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X640-NEXT: retq +; +; 686-LABEL: f1: +; 686: # BB#0: # %entry +; 686-NEXT: pushl %edi +; 686-NEXT: .Lcfi1: +; 686-NEXT: .cfi_def_cfa_offset 8 +; 686-NEXT: pushl %esi +; 686-NEXT: .Lcfi2: +; 686-NEXT: .cfi_def_cfa_offset 12 +; 686-NEXT: subl $1, %esp +; 686-NEXT: .Lcfi3: +; 686-NEXT: .cfi_def_cfa_offset 13 +; 686-NEXT: .Lcfi4: +; 686-NEXT: .cfi_offset %esi, -12 +; 686-NEXT: .Lcfi5: +; 686-NEXT: .cfi_offset %edi, -8 +; 686-NEXT: movl var_5, %edx +; 686-NEXT: movl %edx, %esi +; 686-NEXT: sarl $31, %esi +; 686-NEXT: movl %edx, %ecx +; 686-NEXT: andl %esi, %ecx +; 686-NEXT: xorl %eax, %eax +; 686-NEXT: cmpl $-1, %ecx +; 686-NEXT: sete %al +; 686-NEXT: movl %edx, %ecx +; 686-NEXT: xorl $208307499, %ecx # imm = 0xC6A852B +; 686-NEXT: movl %esi, %edi +; 686-NEXT: xorl $-2, %edi +; 686-NEXT: orl %ecx, %edi +; 686-NEXT: setne (%esp) +; 686-NEXT: xorl %ecx, %ecx +; 686-NEXT: cmpl $-1, %edx +; 686-NEXT: sete %cl +; 686-NEXT: xorl %edi, %edi +; 686-NEXT: addl $7093, %edx # imm = 0x1BB5 +; 686-NEXT: adcxl %edi, %esi +; 686-NEXT: cmpl %ecx, %edx +; 686-NEXT: sbbl $0, %esi +; 686-NEXT: setl %cl +; 686-NEXT: movzbl %cl, %ecx +; 686-NEXT: movl %ecx, var_57 +; 686-NEXT: movl $0, var_57+4 +; 686-NEXT: movl %eax, _ZN8struct_210member_2_0E +; 686-NEXT: movl $0, _ZN8struct_210member_2_0E+4 +; 686-NEXT: addl $1, %esp +; 686-NEXT: popl %esi +; 686-NEXT: popl %edi +; 686-NEXT: retl +; +; X64-LABEL: f1: +; X64: # BB#0: # %entry +; X64-NEXT: movslq {{.*}}(%rip), %rax +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: cmpq $-1, %rax +; X64-NEXT: sete %cl +; X64-NEXT: movabsq $-8381627093, %rdx # imm = 0xFFFFFFFE0C6A852B +; X64-NEXT: cmpq %rdx, %rax +; X64-NEXT: setne -{{[0-9]+}}(%rsp) +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpl $-1, %eax +; X64-NEXT: sete %dl +; X64-NEXT: addq $7093, %rax # imm = 0x1BB5 +; X64-NEXT: xorl %esi, %esi +; X64-NEXT: cmpq %rax, %rdx +; X64-NEXT: setg %sil +; X64-NEXT: movq %rsi, {{.*}}(%rip) +; X64-NEXT: movq %rcx, {{.*}}(%rip) +; X64-NEXT: retq +entry: + %a = alloca i8, align 1 + %0 = load i32, i32* @var_5, align 4 + %conv = sext i32 %0 to i64 + %add = add nsw i64 %conv, 8381627093 + %tobool = icmp ne i64 %add, 0 + %frombool = zext i1 %tobool to i8 + store i8 %frombool, i8* %a, align 1 + %1 = load i32, i32* @var_5, align 4 + %neg = xor i32 %1, -1 + %tobool1 = icmp ne i32 %neg, 0 + %lnot = xor i1 %tobool1, true + %conv2 = zext i1 %lnot to i64 + %2 = load i32, i32* @var_5, align 4 + %conv3 = sext i32 %2 to i64 + %add4 = add nsw i64 %conv3, 7093 + %cmp = icmp sgt i64 %conv2, %add4 + %conv5 = zext i1 %cmp to i64 + store i64 %conv5, i64* @var_57, align 8 + %3 = load i32, i32* @var_5, align 4 + %neg6 = xor i32 %3, -1 + %tobool7 = icmp ne i32 %neg6, 0 + %lnot8 = xor i1 %tobool7, true + %conv9 = zext i1 %lnot8 to i64 + store i64 %conv9, i64* @_ZN8struct_210member_2_0E, align 8 + ret void +} + + +@var_7 = external global i8, align 1 + +define void @f2() { +; 6860-LABEL: f2: +; 6860: # BB#0: # %entry +; 6860-NEXT: pushl %ebx +; 6860-NEXT: .Lcfi10: +; 6860-NEXT: .cfi_def_cfa_offset 8 +; 6860-NEXT: pushl %edi +; 6860-NEXT: .Lcfi11: +; 6860-NEXT: .cfi_def_cfa_offset 12 +; 6860-NEXT: pushl %esi +; 6860-NEXT: .Lcfi12: +; 6860-NEXT: .cfi_def_cfa_offset 16 +; 6860-NEXT: subl $8, %esp +; 6860-NEXT: .Lcfi13: +; 6860-NEXT: .cfi_def_cfa_offset 24 +; 6860-NEXT: .Lcfi14: +; 6860-NEXT: .cfi_offset %esi, -16 +; 6860-NEXT: .Lcfi15: +; 6860-NEXT: .cfi_offset %edi, -12 +; 6860-NEXT: .Lcfi16: +; 6860-NEXT: .cfi_offset %ebx, -8 +; 6860-NEXT: # implicit-def: %EAX +; 6860-NEXT: movzbl var_7, %ecx +; 6860-NEXT: testl %ecx, %ecx +; 6860-NEXT: sete %dl +; 6860-NEXT: movzbl %dl, %esi +; 6860-NEXT: movl %ecx, %edi +; 6860-NEXT: xorl %esi, %edi +; 6860-NEXT: movw %di, %bx +; 6860-NEXT: movw %bx, {{[0-9]+}}(%esp) +; 6860-NEXT: movl %ecx, %edx +; 6860-NEXT: # kill: %DL %DL %EDX +; 6860-NEXT: testb %dl, %dl +; 6860-NEXT: sete %dl +; 6860-NEXT: movzbl %dl, %esi +; 6860-NEXT: subl %ecx, %esi +; 6860-NEXT: sete %dl +; 6860-NEXT: # implicit-def: %ECX +; 6860-NEXT: movb %dl, %cl +; 6860-NEXT: andl $1, %ecx +; 6860-NEXT: kmovw %ecx, %k0 +; 6860-NEXT: kmovd %k0, %ecx +; 6860-NEXT: movb %cl, %dl +; 6860-NEXT: andb $1, %dl +; 6860-NEXT: movzbl %dl, %ecx +; 6860-NEXT: movw %cx, %bx +; 6860-NEXT: movw %bx, (%eax) +; 6860-NEXT: movl %esi, (%esp) # 4-byte Spill +; 6860-NEXT: addl $8, %esp +; 6860-NEXT: popl %esi +; 6860-NEXT: popl %edi +; 6860-NEXT: popl %ebx +; 6860-NEXT: retl +; +; X640-LABEL: f2: +; X640: # BB#0: # %entry +; X640-NEXT: # implicit-def: %RAX +; X640-NEXT: movzbl {{.*}}(%rip), %ecx +; X640-NEXT: testl %ecx, %ecx +; X640-NEXT: sete %dl +; X640-NEXT: movzbl %dl, %esi +; X640-NEXT: movl %ecx, %edi +; X640-NEXT: xorl %esi, %edi +; X640-NEXT: movw %di, %r8w +; X640-NEXT: movw %r8w, -{{[0-9]+}}(%rsp) +; X640-NEXT: movb %cl, %dl +; X640-NEXT: testb %dl, %dl +; X640-NEXT: sete %dl +; X640-NEXT: movzbl %dl, %esi +; X640-NEXT: subl %ecx, %esi +; X640-NEXT: sete %dl +; X640-NEXT: # implicit-def: %ECX +; X640-NEXT: movb %dl, %cl +; X640-NEXT: andl $1, %ecx +; X640-NEXT: kmovw %ecx, %k0 +; X640-NEXT: kmovd %k0, %ecx +; X640-NEXT: movb %cl, %dl +; X640-NEXT: andb $1, %dl +; X640-NEXT: movzbl %dl, %ecx +; X640-NEXT: movw %cx, %r8w +; X640-NEXT: movw %r8w, (%rax) +; X640-NEXT: movl %esi, -{{[0-9]+}}(%rsp) # 4-byte Spill +; X640-NEXT: retq +; +; 686-LABEL: f2: +; 686: # BB#0: # %entry +; 686-NEXT: subl $2, %esp +; 686-NEXT: .Lcfi6: +; 686-NEXT: .cfi_def_cfa_offset 6 +; 686-NEXT: movzbl var_7, %eax +; 686-NEXT: xorl %ecx, %ecx +; 686-NEXT: testl %eax, %eax +; 686-NEXT: sete %cl +; 686-NEXT: xorl %eax, %ecx +; 686-NEXT: movw %cx, (%esp) +; 686-NEXT: xorl %ecx, %ecx +; 686-NEXT: testb %al, %al +; 686-NEXT: sete %cl +; 686-NEXT: xorl %edx, %edx +; 686-NEXT: cmpl %eax, %ecx +; 686-NEXT: sete %dl +; 686-NEXT: movw %dx, (%eax) +; 686-NEXT: addl $2, %esp +; 686-NEXT: retl +; +; X64-LABEL: f2: +; X64: # BB#0: # %entry +; X64-NEXT: movzbl {{.*}}(%rip), %eax +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %cl +; X64-NEXT: xorl %eax, %ecx +; X64-NEXT: movw %cx, -{{[0-9]+}}(%rsp) +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: testb %al, %al +; X64-NEXT: sete %cl +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpl %eax, %ecx +; X64-NEXT: sete %dl +; X64-NEXT: movw %dx, (%rax) +; X64-NEXT: retq +entry: + %a = alloca i16, align 2 + %0 = load i8, i8* @var_7, align 1 + %conv = zext i8 %0 to i32 + %1 = load i8, i8* @var_7, align 1 + %tobool = icmp ne i8 %1, 0 + %lnot = xor i1 %tobool, true + %conv1 = zext i1 %lnot to i32 + %xor = xor i32 %conv, %conv1 + %conv2 = trunc i32 %xor to i16 + store i16 %conv2, i16* %a, align 2 + %2 = load i8, i8* @var_7, align 1 + %conv3 = zext i8 %2 to i16 + %tobool4 = icmp ne i16 %conv3, 0 + %lnot5 = xor i1 %tobool4, true + %conv6 = zext i1 %lnot5 to i32 + %3 = load i8, i8* @var_7, align 1 + %conv7 = zext i8 %3 to i32 + %cmp = icmp eq i32 %conv6, %conv7 + %conv8 = zext i1 %cmp to i32 + %conv9 = trunc i32 %conv8 to i16 + store i16 %conv9, i16* undef, align 2 + ret void +}