Index: test/CodeGen/MIR/Generic/demorgan-extra.ll =================================================================== --- /dev/null +++ test/CodeGen/MIR/Generic/demorgan-extra.ll @@ -0,0 +1,201 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple x86_64-- -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan-extra.ll +; Please keep them in sync! :) + +declare void @use8(i8) +declare i8 @gen8() + +; ============================================================================ ; + +; ~(~A & B) --> (A | ~B) + +define i8 @demorgan_nand(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_commutative(i8 %A) { + ; CHECK-LABEL: name: demorgan_nand_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen8, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $al + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY1]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY2]], killed [[NOT8r]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %B = call i8 @gen8() + %notx = xor i8 %A, -1 + %c = and i8 %B, %notx ; swapped + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_extraxor(i8 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen8, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $al + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY $al + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen8, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $al + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY2]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY1]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[XOR8rr]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %V = call i8 @gen8() + %Z = call i8 @gen8() + %B = xor i8 %V, %Z ; not with -1 + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_extraxor_commutative(i8 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen8, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $al + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY $al + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen8, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $al + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY2]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY1]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[XOR8rr]], killed [[NOT8r]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %V = call i8 @gen8() + %Z = call i8 @gen8() + %B = xor i8 %V, %Z ; not with -1 + %notx = xor i8 %A, -1 + %c = and i8 %B, %notx ; swapped + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_badxor0(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[XOR8ri:%[0-9]+]]:gr8 = XOR8ri [[COPY3]], 1, implicit-def dead $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[XOR8ri]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, 1 ; not -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_badxor1(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[XOR8ri:%[0-9]+]]:gr8 = XOR8ri [[AND8rr]], 1, implicit-def dead $eflags + ; CHECK: $al = COPY [[XOR8ri]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, 1 ; not -1 + ret i8 %notc +} + +define i8 @demorgan_nand_oneuse0(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[NOT8r]] + ; CHECK: $edi = COPY [[MOVZX32rr8_]] + ; CHECK: CALL64pcrel32 @use8, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $rsp, implicit-def $ssp + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + call void @use8(i8 %notx) + ret i8 %notc +} + +define i8 @demorgan_nand_oneuse1(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[AND8rr]] + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $edi = COPY [[MOVZX32rr8_]] + ; CHECK: CALL64pcrel32 @use8, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $rsp, implicit-def $ssp + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + call void @use8(i8 %c) + ret i8 %notc +} + +; ============================================================================ ; Index: test/CodeGen/MIR/Generic/demorgan.ll =================================================================== --- /dev/null +++ test/CodeGen/MIR/Generic/demorgan.ll @@ -0,0 +1,858 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple x86_64-- -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan.ll +; Please keep them in sync! :) + +; (~A | ~B) == ~(A & B) + +define i43 @demorgan_or_apint1(i43 %A, i43 %B) { + ; CHECK-LABEL: name: demorgan_or_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 8796093022207 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[OR64rr]] + ; CHECK: RET 0, $rax + %NotA = xor i43 %A, -1 + %NotB = xor i43 %B, -1 + %C = or i43 %NotA, %NotB + ret i43 %C +} + +; (~A | ~B) == ~(A & B) + +define i129 @demorgan_or_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_or_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr32 = COPY [[COPY3]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY6]] + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[NOT64r1]], killed [[NOT64r3]], implicit-def dead $eflags + ; CHECK: [[OR64rr1:%[0-9]+]]:gr64 = OR64rr [[NOT64r]], killed [[NOT64r2]], implicit-def dead $eflags + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[OR32rr]], %subreg.sub_32bit + ; CHECK: [[COPY8:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY8]], 1, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[OR64rr]] + ; CHECK: $rdx = COPY [[OR64rr1]] + ; CHECK: $rcx = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax, $rdx, $rcx + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = or i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i477 @demorgan_and_apint1(i477 %A, i477 %B) { + ; CHECK-LABEL: name: demorgan_and_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr64 = COPY [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr64 = COPY [[COPY5]] + ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.7, 1, $noreg, 56, $noreg :: (load 4 from %fixed-stack.7 + 56, align 8) + ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.7) + ; CHECK: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 8, $noreg :: (load 8 from %fixed-stack.7 + 8) + ; CHECK: [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 16, $noreg :: (load 8 from %fixed-stack.7 + 16) + ; CHECK: [[MOV64rm3:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 24, $noreg :: (load 8 from %fixed-stack.7 + 24) + ; CHECK: [[MOV64rm4:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 32, $noreg :: (load 8 from %fixed-stack.7 + 32) + ; CHECK: [[MOV64rm5:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 40, $noreg :: (load 8 from %fixed-stack.7 + 40) + ; CHECK: [[MOV64rm6:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 48, $noreg :: (load 8 from %fixed-stack.7 + 48) + ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.9, 1, $noreg, 8, $noreg :: (load 4 from %fixed-stack.9 + 8, align 8) + ; CHECK: [[MOV64rm7:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.10, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.10, align 16) + ; CHECK: [[MOV64rm8:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.9, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.9) + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[MOV64rm8]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[MOV64rm7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r4:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[NOT64r5:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[NOT64r6:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[MOV32rm1]] + ; CHECK: [[NOT64r7:%[0-9]+]]:gr64 = NOT64r [[MOV64rm6]] + ; CHECK: [[NOT64r8:%[0-9]+]]:gr64 = NOT64r [[MOV64rm5]] + ; CHECK: [[NOT64r9:%[0-9]+]]:gr64 = NOT64r [[MOV64rm4]] + ; CHECK: [[NOT64r10:%[0-9]+]]:gr64 = NOT64r [[MOV64rm3]] + ; CHECK: [[NOT64r11:%[0-9]+]]:gr64 = NOT64r [[MOV64rm2]] + ; CHECK: [[NOT64r12:%[0-9]+]]:gr64 = NOT64r [[MOV64rm1]] + ; CHECK: [[NOT64r13:%[0-9]+]]:gr64 = NOT64r [[MOV64rm]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[MOV32rm]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r6]], killed [[NOT64r13]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[NOT64r5]], killed [[NOT64r12]], implicit-def dead $eflags + ; CHECK: [[AND64rr2:%[0-9]+]]:gr64 = AND64rr [[NOT64r4]], killed [[NOT64r11]], implicit-def dead $eflags + ; CHECK: [[AND64rr3:%[0-9]+]]:gr64 = AND64rr [[NOT64r3]], killed [[NOT64r10]], implicit-def dead $eflags + ; CHECK: [[AND64rr4:%[0-9]+]]:gr64 = AND64rr [[NOT64r2]], killed [[NOT64r9]], implicit-def dead $eflags + ; CHECK: [[AND64rr5:%[0-9]+]]:gr64 = AND64rr [[NOT64r1]], killed [[NOT64r8]], implicit-def dead $eflags + ; CHECK: [[AND64rr6:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r7]], implicit-def dead $eflags + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 48, $noreg, killed [[AND64rr6]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 40, $noreg, killed [[AND64rr5]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 32, $noreg, killed [[AND64rr4]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 24, $noreg, killed [[AND64rr3]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 16, $noreg, killed [[AND64rr2]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 8, $noreg, killed [[AND64rr1]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 0, $noreg, killed [[AND64rr]] :: (store 8) + ; CHECK: [[AND32ri:%[0-9]+]]:gr32 = AND32ri [[AND32rr]], 536870911, implicit-def dead $eflags + ; CHECK: MOV32mr [[COPY7]], 1, $noreg, 56, $noreg, killed [[AND32ri]] :: (store 4, align 8) + ; CHECK: $rax = COPY [[COPY6]] + ; CHECK: RET 0, $rax + %NotA = xor i477 %A, -1 + %NotB = xor i477 %B, -1 + %C = and i477 %NotA, %NotB + ret i477 %C +} + +; (~A & ~B) == ~(A | B) + +define i129 @demorgan_and_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_and_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr32 = COPY [[COPY3]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY6]] + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r1]], killed [[NOT64r3]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r2]], implicit-def dead $eflags + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[AND32rr]], %subreg.sub_32bit + ; CHECK: [[COPY8:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY8]], 1, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: $rcx = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax, $rdx, $rcx + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = and i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i65 @demorgan_and_apint3(i65 %A, i65 %B) { + ; CHECK-LABEL: name: demorgan_and_apint3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 1, implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 1, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: RET 0, $rax, $rdx + %NotA = xor i65 %A, -1 + %NotB = xor i65 -1, %B + %C = and i65 %NotA, %NotB + ret i65 %C +} + +; (~A & ~B) == ~(A | B) + +define i66 @demorgan_and_apint4(i66 %A, i66 %B) { + ; CHECK-LABEL: name: demorgan_and_apint4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 3, implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 3, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: RET 0, $rax, $rdx + %NotA = xor i66 %A, -1 + %NotB = xor i66 %B, -1 + %C = and i66 %NotA, %NotB + ret i66 %C +} + +; (~A & ~B) == ~(A | B) + +define i47 @demorgan_and_apint5(i47 %A, i47 %B) { + ; CHECK-LABEL: name: demorgan_and_apint5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 140737488355327 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: RET 0, $rax + %NotA = xor i47 %A, -1 + %NotB = xor i47 %B, -1 + %C = and i47 %NotA, %NotB + ret i47 %C +} + +; This is confirming that 2 transforms work together: +; ~(~A & ~B) --> A | B + +define i32 @test3(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[NOT32r2:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r2]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = and i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Invert a constant if needed: +; ~(~A & 5) --> A | ~5 + +define i32 @test4(i32 %A) { + ; CHECK-LABEL: name: test4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[COPY]], -6, implicit-def dead $eflags + ; CHECK: $eax = COPY [[OR32ri8_]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %c = and i32 %nota, 5 + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Test the mirror of DeMorgan's law with an extra 'not'. +; ~(~A | ~B) --> A & B + +define i32 @test5(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[NOT32r2:%[0-9]+]]:gr32 = NOT32r [[OR32rr]] + ; CHECK: $eax = COPY [[NOT32r2]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = or i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Repeat with weird types for extra coverage. +; ~(~A & ~B) --> A | B + +define i47 @test3_apint(i47 %A, i47 %B) { + ; CHECK-LABEL: name: test3_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 140737488355327 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: [[XOR64rr2:%[0-9]+]]:gr64 = XOR64rr [[AND64rr]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[XOR64rr2]] + ; CHECK: RET 0, $rax + %nota = xor i47 %A, -1 + %notb = xor i47 %B, -1 + %c = and i47 %nota, %notb + %notc = xor i47 %c, -1 + ret i47 %notc +} + +; ~(~A & 5) --> A | ~5 + +define i61 @test4_apint(i61 %A) { + ; CHECK-LABEL: name: test4_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[NOT32r]], %subreg.sub_32bit + ; CHECK: [[COPY2:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY2]], 5, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax + %nota = xor i61 %A, -1 + %c = and i61 %nota, 5 ; 5 = ~c2 + %notc = xor i61 %c, -1 + ret i61 %c +} + +; ~(~A | ~B) --> A & B + +define i71 @test5_apint(i71 %A, i71 %B) { + ; CHECK-LABEL: name: test5_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 127, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 127, implicit-def dead $eflags + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: [[OR64rr1:%[0-9]+]]:gr64 = OR64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[OR64rr1]] + ; CHECK: [[XOR64ri8_2:%[0-9]+]]:gr64 = XOR64ri8 [[OR64rr]], 127, implicit-def dead $eflags + ; CHECK: $rax = COPY [[NOT64r2]] + ; CHECK: $rdx = COPY [[XOR64ri8_2]] + ; CHECK: RET 0, $rax, $rdx + %nota = xor i71 %A, -1 + %notb = xor i71 %B, -1 + %c = or i71 %nota, %notb + %notc = xor i71 %c, -1 + ret i71 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i8 @demorgan_nand(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i7 @demorgan_nand_apint1(i7 %A, i7 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[XOR8ri:%[0-9]+]]:gr8 = XOR8ri [[COPY3]], 127, implicit-def dead $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[XOR8ri]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[XOR8ri1:%[0-9]+]]:gr8 = XOR8ri [[AND8rr]], 127, implicit-def dead $eflags + ; CHECK: $al = COPY [[XOR8ri1]] + ; CHECK: RET 0, $al + %nota = xor i7 %A, -1 + %c = and i7 %nota, %B + %notc = xor i7 %c, -1 + ret i7 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i117 @demorgan_nand_apint2(i117 %A, i117 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 9007199254740991 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY2]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[AND64rr1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[AND64rr]] + ; CHECK: $rax = COPY [[NOT64r1]] + ; CHECK: $rdx = COPY [[XOR64rr1]] + ; CHECK: RET 0, $rax, $rdx + %nota = xor i117 %A, -1 + %c = and i117 %nota, %B + %notc = xor i117 %c, -1 + ret i117 %notc +} + +; ~(~A | B) --> (A & ~B) + +define i8 @demorgan_nor(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = or i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2a(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2a + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 23 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2a + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2b(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2b + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2b + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2c(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2c + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2c + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ab(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ab + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[MOV8ri1:%[0-9]+]]:gr8 = MOV8ri 17 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri1]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2b + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ac(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ac + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 17 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[MOV8ri1:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r killed [[MOV8ri1]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2bc(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2bc + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2b + ret i8 %r2 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant1(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[NOT32r]], -16, implicit-def dead $eflags + ; CHECK: $eax = COPY [[OR32ri8_]] + ; CHECK: RET 0, $eax + %and = and i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant2(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[NOT32r]], -16, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %and = or i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; PR22723: Recognize DeMorgan's Laws when obfuscated by zexts. + +define i32 @demorgan_or_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_or_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r1]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[OR32rr]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %or = or i32 %notX, %notY + ret i32 %or +} + +define i32 @demorgan_and_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_and_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r1]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[AND32rr]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %and = and i32 %notX, %notY + ret i32 %and +} + +define <2 x i32> @demorgan_or_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_or_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[COPY1]], [[V_SETALLONES]] + ; CHECK: [[PXORrr1:%[0-9]+]]:vr128 = PXORrr [[COPY]], [[V_SETALLONES]] + ; CHECK: [[PORrr:%[0-9]+]]:vr128 = PORrr [[PXORrr]], killed [[PXORrr1]] + ; CHECK: [[PANDrm:%[0-9]+]]:vr128 = PANDrm [[PORrr]], $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool) + ; CHECK: $xmm0 = COPY [[PANDrm]] + ; CHECK: RET 0, $xmm0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %or = or <2 x i32> %notX, %notY + ret <2 x i32> %or +} + +define <2 x i32> @demorgan_and_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_and_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[COPY]], killed [[V_SETALLONES]] + ; CHECK: [[PANDNrr:%[0-9]+]]:vr128 = PANDNrr [[COPY1]], killed [[PXORrr]] + ; CHECK: [[PANDrm:%[0-9]+]]:vr128 = PANDrm [[PANDNrr]], $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool) + ; CHECK: $xmm0 = COPY [[PANDrm]] + ; CHECK: RET 0, $xmm0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %and = and <2 x i32> %notX, %notY + ret <2 x i32> %and +} + +define i32 @PR28476(i32 %x, i32 %y) { + ; CHECK-LABEL: name: PR28476 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: TEST32rr [[COPY1]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags + ; CHECK: TEST32rr [[COPY]], [[COPY]], implicit-def $eflags + ; CHECK: [[SETNEr1:%[0-9]+]]:gr8 = SETNEr implicit $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETNEr]], killed [[SETNEr1]], implicit-def dead $eflags + ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 killed [[AND8rr]] + ; CHECK: [[XOR32ri8_:%[0-9]+]]:gr32 = XOR32ri8 [[MOVZX32rr8_]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[XOR32ri8_]] + ; CHECK: RET 0, $eax + %cmp0 = icmp ne i32 %x, 0 + %cmp1 = icmp ne i32 %y, 0 + %and = and i1 %cmp0, %cmp1 + %zext = zext i1 %and to i32 + %cond = xor i32 %zext, 1 + ret i32 %cond +} + +; ~(~(a | b) | (a & b)) --> (a | b) & ~(a & b) -> a ^ b + +define i32 @demorgan_plus_and_to_xor(i32 %a, i32 %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[OR32rr]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[OR32rr1:%[0-9]+]]:gr32 = OR32rr [[AND32rr]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[OR32rr1]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %or = or i32 %b, %a + %notor = xor i32 %or, -1 + %and = and i32 %b, %a + %or2 = or i32 %and, %notor + %not = xor i32 %or2, -1 + ret i32 %not +} + +define <4 x i32> @demorgan_plus_and_to_xor_vec(<4 x i32> %a, <4 x i32> %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[PORrr:%[0-9]+]]:vr128 = PORrr [[COPY1]], [[COPY]] + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[PORrr]], [[V_SETALLONES]] + ; CHECK: [[PANDrr:%[0-9]+]]:vr128 = PANDrr [[COPY1]], [[COPY]] + ; CHECK: [[PORrr1:%[0-9]+]]:vr128 = PORrr [[PANDrr]], killed [[PXORrr]] + ; CHECK: [[PXORrr1:%[0-9]+]]:vr128 = PXORrr [[PORrr1]], [[V_SETALLONES]] + ; CHECK: $xmm0 = COPY [[PXORrr1]] + ; CHECK: RET 0, $xmm0 + %or = or <4 x i32> %a, %b + %notor = xor <4 x i32> %or, < i32 -1, i32 -1, i32 -1, i32 -1 > + %and = and <4 x i32> %a, %b + %or2 = or <4 x i32> %and, %notor + %not = xor <4 x i32> %or2, < i32 -1, i32 -1, i32 -1, i32 -1 > + ret <4 x i32> %not +} Index: test/Transforms/InstCombine/demorgan-extra.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/demorgan-extra.ll @@ -0,0 +1,132 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +; There is a identical twin test in test/CodeGen/MIR/Generic/demorgan-extra.ll +; Please keep them in sync! :) + +declare void @use8(i8) +declare i8 @gen8() + +; ============================================================================ ; + +; ~(~A & B) --> (A | ~B) + +define i8 @demorgan_nand(i8 %A, i8 %B) { +; CHECK-LABEL: @demorgan_nand( +; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i8 [[NOTC]] +; + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_commutative(i8 %A) { +; CHECK-LABEL: @demorgan_nand_commutative( +; CHECK-NEXT: [[B:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i8 [[NOTC]] +; + %B = call i8 @gen8() + %notx = xor i8 %A, -1 + %c = and i8 %B, %notx ; swapped + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_extraxor(i8 %A) { +; CHECK-LABEL: @demorgan_nand_extraxor( +; CHECK-NEXT: [[V:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[Z:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[B:%.*]] = xor i8 [[V]], [[Z]] +; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i8 [[NOTC]] +; + %V = call i8 @gen8() + %Z = call i8 @gen8() + %B = xor i8 %V, %Z ; not with -1 + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_extraxor_commutative(i8 %A) { +; CHECK-LABEL: @demorgan_nand_extraxor_commutative( +; CHECK-NEXT: [[V:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[Z:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[B:%.*]] = xor i8 [[V]], [[Z]] +; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i8 [[NOTC]] +; + %V = call i8 @gen8() + %Z = call i8 @gen8() + %B = xor i8 %V, %Z ; not with -1 + %notx = xor i8 %A, -1 + %c = and i8 %B, %notx ; swapped + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_badxor0(i8 %A, i8 %B) { +; CHECK-LABEL: @demorgan_nand_badxor0( +; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[A:%.*]], 1 +; CHECK-NEXT: [[C:%.*]] = and i8 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 +; CHECK-NEXT: ret i8 [[NOTC]] +; + %notx = xor i8 %A, 1 ; not -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +define i8 @demorgan_nand_badxor1(i8 %A, i8 %B) { +; CHECK-LABEL: @demorgan_nand_badxor1( +; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[A:%.*]], -1 +; CHECK-NEXT: [[C:%.*]] = and i8 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], 1 +; CHECK-NEXT: ret i8 [[NOTC]] +; + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, 1 ; not -1 + ret i8 %notc +} + +define i8 @demorgan_nand_oneuse0(i8 %A, i8 %B) { +; CHECK-LABEL: @demorgan_nand_oneuse0( +; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[A:%.*]], -1 +; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A]] +; CHECK-NEXT: call void @use8(i8 [[NOTX]]) +; CHECK-NEXT: ret i8 [[NOTC]] +; + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + call void @use8(i8 %notx) + ret i8 %notc +} + +define i8 @demorgan_nand_oneuse1(i8 %A, i8 %B) { +; CHECK-LABEL: @demorgan_nand_oneuse1( +; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[A:%.*]], -1 +; CHECK-NEXT: [[C:%.*]] = and i8 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 +; CHECK-NEXT: call void @use8(i8 [[C]]) +; CHECK-NEXT: ret i8 [[NOTC]] +; + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + call void @use8(i8 %c) + ret i8 %notc +} + +; ============================================================================ ; Index: test/Transforms/InstCombine/demorgan.ll =================================================================== --- test/Transforms/InstCombine/demorgan.ll +++ test/Transforms/InstCombine/demorgan.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s +; There is a identical twin test in test/CodeGen/MIR/Generic/demorgan.ll +; Please keep them in sync! :) + ; (~A | ~B) == ~(A & B) define i43 @demorgan_or_apint1(i43 %A, i43 %B) {