Index: test/CodeGen/AArch64/demorgan-extra.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/demorgan-extra.ll @@ -0,0 +1,244 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan-extra.ll +; Please keep them in sync! :) + +declare void @use32(i32) +declare i32 @gen32() + +; ============================================================================ ; + +; ~(~A & B) --> (A | ~B) + +define i32 @demorgan_nand(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY1]], [[COPY]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], [[COPY]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define <2 x i32> @demorgan_nand_vec(<2 x i32> %A, <2 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $d0, $d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[BICv8i8_:%[0-9]+]]:fpr64 = BICv8i8 [[COPY]], [[COPY1]] + ; CHECK: [[NOTv8i8_:%[0-9]+]]:fpr64 = NOTv8i8 killed [[BICv8i8_]] + ; CHECK: $d0 = COPY [[NOTv8i8_]] + ; CHECK: RET_ReallyLR implicit $d0 + %notx = xor <2 x i32> %A, + %c = and <2 x i32> %notx, %B + %notc = xor <2 x i32> %c, + ret <2 x i32> %notc +} + +define <3 x i32> @demorgan_nand_vec_undef(<3 x i32> %A, <3 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_undef + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $q0, $q1 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[BICv16i8_:%[0-9]+]]:fpr128 = BICv16i8 [[COPY]], [[COPY1]] + ; CHECK: [[NOTv16i8_:%[0-9]+]]:fpr128 = NOTv16i8 killed [[BICv16i8_]] + ; CHECK: $q0 = COPY [[NOTv16i8_]] + ; CHECK: RET_ReallyLR implicit $q0 + %notx = xor <3 x i32> %A, + %c = and <3 x i32> %notx, %B + %notc = xor <3 x i32> %c, + ret <3 x i32> %notc +} + +define <4 x i32> @demorgan_nand_vec_128bit(<4 x i32> %A, <4 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_128bit + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $q0, $q1 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[BICv16i8_:%[0-9]+]]:fpr128 = BICv16i8 [[COPY]], [[COPY1]] + ; CHECK: [[NOTv16i8_:%[0-9]+]]:fpr128 = NOTv16i8 killed [[BICv16i8_]] + ; CHECK: $q0 = COPY [[NOTv16i8_]] + ; CHECK: RET_ReallyLR implicit $q0 + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} + +define <4 x i32> @demorgan_nand_vec_128bit_undef(<4 x i32> %A, <4 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_128bit_undef + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $q0, $q1 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[BICv16i8_:%[0-9]+]]:fpr128 = BICv16i8 [[COPY]], [[COPY1]] + ; CHECK: [[NOTv16i8_:%[0-9]+]]:fpr128 = NOTv16i8 killed [[BICv16i8_]] + ; CHECK: $q0 = COPY [[NOTv16i8_]] + ; CHECK: RET_ReallyLR implicit $q0 + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} + +define i32 @demorgan_nand_commutative(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: BL @gen32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], [[COPY1]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %B = call i32 @gen32() + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: BL @gen32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: BL @gen32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY1]], [[COPY2]] + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[EORWrr]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], [[EORWrr]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor_commutative(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: BL @gen32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: BL @gen32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY1]], [[COPY2]] + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[EORWrr]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], [[EORWrr]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor0(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[EORWri:%[0-9]+]]:gpr32common = EORWri [[COPY1]], 0 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr killed [[EORWri]], [[COPY]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ANDWrr]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i32 %A, 1 ; not -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor1(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr [[COPY]], [[COPY1]] + ; CHECK: [[EORWri:%[0-9]+]]:gpr32sp = EORWri killed [[BICWrr]], 0 + ; CHECK: $w0 = COPY [[EORWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, 1 ; not -1 + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse0(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[BICWrr]] + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: BL @use32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $w0, implicit-def $sp + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: $w0 = COPY [[ORNWrr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %notx) + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse1(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[BICWrr]] + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: $w0 = COPY [[BICWrr]] + ; CHECK: BL @use32, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $w0, implicit-def $sp + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %c) + ret i32 %notc +} + +; ============================================================================ ; Index: test/CodeGen/AArch64/demorgan.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/demorgan.ll @@ -0,0 +1,766 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan.ll +; Please keep them in sync! :) + +; (~A | ~B) == ~(A & B) + +define i43 @demorgan_or_apint1(i43 %A, i43 %B) { + ; CHECK-LABEL: name: demorgan_or_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY1]], 4138 + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4138 + ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: $x0 = COPY [[ORRXrr]] + ; CHECK: RET_ReallyLR implicit $x0 + %NotA = xor i43 %A, -1 + %NotB = xor i43 %B, -1 + %C = or i43 %NotA, %NotB + ret i43 %C +} + +; (~A | ~B) == ~(A & B) + +define i129 @demorgan_or_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_or_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x4, $x5, $x6 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x6 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x5 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x4 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY4:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY5:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY6:%[0-9]+]]:gpr32 = COPY [[COPY3]].sub_32 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[COPY6]] + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY4]] + ; CHECK: [[ORNXrr1:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: [[ORNXrr2:%[0-9]+]]:gpr64 = ORNXrr killed [[ORNXrr1]], [[COPY2]] + ; CHECK: [[ORNXrr3:%[0-9]+]]:gpr64 = ORNXrr killed [[ORNXrr]], [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr killed [[ORNWrr]], killed [[COPY7]] + ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], killed [[ORNWrr1]], %subreg.sub_32 + ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri killed [[INSERT_SUBREG]], 4096 + ; CHECK: $x0 = COPY [[ORNXrr2]] + ; CHECK: $x1 = COPY [[ORNXrr3]] + ; CHECK: $x2 = COPY [[ANDXri]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1, implicit $x2 + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = or i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i477 @demorgan_and_apint1(i477 %A, i477 %B) { + ; CHECK-LABEL: name: demorgan_and_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x7 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x6 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x5 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x4 + ; CHECK: [[COPY4:%[0-9]+]]:gpr64 = COPY $x3 + ; CHECK: [[COPY5:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY6:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY7:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.1, 0 :: (load 8 from %fixed-stack.1, align 16) + ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.2, 0 :: (load 8 from %fixed-stack.2) + ; CHECK: [[LDRXui2:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.3, 0 :: (load 8 from %fixed-stack.3, align 16) + ; CHECK: [[LDRXui3:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.4, 0 :: (load 8 from %fixed-stack.4) + ; CHECK: [[LDRXui4:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.5, 0 :: (load 8 from %fixed-stack.5, align 16) + ; CHECK: [[LDRXui5:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.6, 0 :: (load 8 from %fixed-stack.6) + ; CHECK: [[LDRXui6:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.7, 0 :: (load 8 from %fixed-stack.7, align 16) + ; CHECK: [[LDRXui7:%[0-9]+]]:gpr64 = LDRXui %fixed-stack.0, 0 :: (load 8 from %fixed-stack.0) + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4124 + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY1]] + ; CHECK: [[ORNXrr1:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY2]] + ; CHECK: [[ORNXrr2:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY3]] + ; CHECK: [[ORNXrr3:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY4]] + ; CHECK: [[ORNXrr4:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY5]] + ; CHECK: [[ORNXrr5:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY6]] + ; CHECK: [[ORNXrr6:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY7]] + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri killed [[LDRXui7]], 4124 + ; CHECK: [[BICXrr:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr6]], killed [[LDRXui6]] + ; CHECK: [[BICXrr1:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr5]], killed [[LDRXui5]] + ; CHECK: [[BICXrr2:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr4]], killed [[LDRXui4]] + ; CHECK: [[BICXrr3:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr3]], killed [[LDRXui3]] + ; CHECK: [[BICXrr4:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr2]], killed [[LDRXui2]] + ; CHECK: [[BICXrr5:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr1]], killed [[LDRXui1]] + ; CHECK: [[BICXrr6:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr]], killed [[LDRXui]] + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: $x0 = COPY [[BICXrr]] + ; CHECK: $x1 = COPY [[BICXrr1]] + ; CHECK: $x2 = COPY [[BICXrr2]] + ; CHECK: $x3 = COPY [[BICXrr3]] + ; CHECK: $x4 = COPY [[BICXrr4]] + ; CHECK: $x5 = COPY [[BICXrr5]] + ; CHECK: $x6 = COPY [[BICXrr6]] + ; CHECK: $x7 = COPY [[ANDXrr]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7 + %NotA = xor i477 %A, -1 + %NotB = xor i477 %B, -1 + %C = and i477 %NotA, %NotB + ret i477 %C +} + +; (~A & ~B) == ~(A | B) + +define i129 @demorgan_and_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_and_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x4, $x5, $x6 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x6 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x5 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x4 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY4:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY5:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY6:%[0-9]+]]:gpr32 = COPY [[COPY3]].sub_32 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[COPY6]] + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY4]] + ; CHECK: [[ORNXrr1:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: [[BICXrr:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr1]], [[COPY2]] + ; CHECK: [[BICXrr1:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr]], [[COPY1]] + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr killed [[ORNWrr]], killed [[COPY7]] + ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], killed [[BICWrr]], %subreg.sub_32 + ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri killed [[INSERT_SUBREG]], 4096 + ; CHECK: $x0 = COPY [[BICXrr]] + ; CHECK: $x1 = COPY [[BICXrr1]] + ; CHECK: $x2 = COPY [[ANDXri]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1, implicit $x2 + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = and i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i65 @demorgan_and_apint3(i65 %A, i65 %B) { + ; CHECK-LABEL: name: demorgan_and_apint3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x3 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x3 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY2]], 4096 + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY3]] + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4096 + ; CHECK: [[BICXrr:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr]], [[COPY1]] + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: $x0 = COPY [[BICXrr]] + ; CHECK: $x1 = COPY [[ANDXrr]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1 + %NotA = xor i65 %A, -1 + %NotB = xor i65 -1, %B + %C = and i65 %NotA, %NotB + ret i65 %C +} + +; (~A & ~B) == ~(A | B) + +define i66 @demorgan_and_apint4(i66 %A, i66 %B) { + ; CHECK-LABEL: name: demorgan_and_apint4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x3 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x3 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY2]], 4097 + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY3]] + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4097 + ; CHECK: [[BICXrr:%[0-9]+]]:gpr64 = BICXrr killed [[ORNXrr]], [[COPY1]] + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: $x0 = COPY [[BICXrr]] + ; CHECK: $x1 = COPY [[ANDXrr]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1 + %NotA = xor i66 %A, -1 + %NotB = xor i66 %B, -1 + %C = and i66 %NotA, %NotB + ret i66 %C +} + +; (~A & ~B) == ~(A | B) + +define i47 @demorgan_and_apint5(i47 %A, i47 %B) { + ; CHECK-LABEL: name: demorgan_and_apint5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY1]], 4142 + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4142 + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: $x0 = COPY [[ANDXrr]] + ; CHECK: RET_ReallyLR implicit $x0 + %NotA = xor i47 %A, -1 + %NotB = xor i47 %B, -1 + %C = and i47 %NotA, %NotB + ret i47 %C +} + +; This is confirming that 2 transforms work together: +; ~(~A & ~B) --> A | B + +define i32 @test3(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr [[COPY1]], [[COPY]] + ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr killed [[BICWrr]], [[COPY]] + ; CHECK: $w0 = COPY [[ORRWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = and i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Invert a constant if needed: +; ~(~A & 5) --> A | ~5 + +define i32 @test4(i32 %A) { + ; CHECK-LABEL: name: test4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -6 + ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[COPY]], killed [[MOVi32imm]] + ; CHECK: $w0 = COPY [[ORRWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i32 %A, -1 + %c = and i32 %nota, 5 + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Test the mirror of DeMorgan's law with an extra 'not'. +; ~(~A | ~B) --> A & B + +define i32 @test5(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr killed [[ORNWrr]], [[COPY]] + ; CHECK: [[ORNWrr2:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr1]] + ; CHECK: $w0 = COPY [[ORNWrr2]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = or i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Repeat with weird types for extra coverage. +; ~(~A & ~B) --> A | B + +define i47 @test3_apint(i47 %A, i47 %B) { + ; CHECK-LABEL: name: test3_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4142 + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[COPY1]], killed [[EORXri]] + ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr killed [[ANDXrr]], [[COPY]] + ; CHECK: $x0 = COPY [[ORRXrr]] + ; CHECK: RET_ReallyLR implicit $x0 + %nota = xor i47 %A, -1 + %notb = xor i47 %B, -1 + %c = and i47 %nota, %notb + %notc = xor i47 %c, -1 + ret i47 %notc +} + +; ~(~A & 5) --> A | ~5 + +define i61 @test4_apint(i61 %A) { + ; CHECK-LABEL: name: test4_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[COPY1]] + ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], killed [[ORNWrr]], %subreg.sub_32 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 5 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr killed [[INSERT_SUBREG]], killed [[SUBREG_TO_REG]] + ; CHECK: $x0 = COPY [[ANDXrr]] + ; CHECK: RET_ReallyLR implicit $x0 + %nota = xor i61 %A, -1 + %c = and i61 %nota, 5 ; 5 = ~c2 + %notc = xor i61 %c, -1 + ret i61 %c +} + +; ~(~A | ~B) --> A & B + +define i71 @test5_apint(i71 %A, i71 %B) { + ; CHECK-LABEL: name: test5_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x3 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x3 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY3]] + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY2]], 4102 + ; CHECK: [[EORXri1:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4102 + ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr killed [[EORXri]], killed [[EORXri1]] + ; CHECK: [[ORNXrr1:%[0-9]+]]:gpr64 = ORNXrr killed [[ORNXrr]], [[COPY1]] + ; CHECK: [[ORNXrr2:%[0-9]+]]:gpr64 = ORNXrr $xzr, killed [[ORNXrr1]] + ; CHECK: [[EORXri2:%[0-9]+]]:gpr64sp = EORXri killed [[ORRXrr]], 4102 + ; CHECK: $x0 = COPY [[ORNXrr2]] + ; CHECK: $x1 = COPY [[EORXri2]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1 + %nota = xor i71 %A, -1 + %notb = xor i71 %B, -1 + %c = or i71 %nota, %notb + %notc = xor i71 %c, -1 + ret i71 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i8 @demorgan_nand(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY1]], [[COPY]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], [[COPY]] + ; CHECK: $w0 = COPY [[ORNWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i7 @demorgan_nand_apint1(i7 %A, i7 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY1]], [[COPY]] + ; CHECK: [[EORWri:%[0-9]+]]:gpr32common = EORWri [[COPY]], 6 + ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr killed [[ANDWrr]], killed [[EORWri]] + ; CHECK: $w0 = COPY [[ORRWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i7 %A, -1 + %c = and i7 %nota, %B + %notc = xor i7 %c, -1 + ret i7 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i117 @demorgan_nand_apint2(i117 %A, i117 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $x0, $x1, $x2, $x3 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x3 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x2 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[COPY3]], [[COPY1]] + ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr killed [[ANDXrr]], [[COPY1]] + ; CHECK: [[ANDXrr1:%[0-9]+]]:gpr64 = ANDXrr [[COPY2]], [[COPY]] + ; CHECK: [[EORXri:%[0-9]+]]:gpr64common = EORXri [[COPY]], 4148 + ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr killed [[ANDXrr1]], killed [[EORXri]] + ; CHECK: $x0 = COPY [[ORNXrr]] + ; CHECK: $x1 = COPY [[ORRXrr]] + ; CHECK: RET_ReallyLR implicit $x0, implicit $x1 + %nota = xor i117 %A, -1 + %c = and i117 %nota, %B + %notc = xor i117 %c, -1 + ret i117 %notc +} + +; ~(~A | B) --> (A & ~B) + +define i8 @demorgan_nor(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr]] + ; CHECK: $w0 = COPY [[ORNWrr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %notx = xor i8 %A, -1 + %c = or i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2a(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2a + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr killed [[ORNWrr]], killed [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr2:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr1]] + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr2]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri1]], killed [[SBFMWri]] + ; CHECK: $w0 = COPY [[SDIVWr]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 23 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2a + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2b(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2b + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], killed [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr]] + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr1]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri1]], killed [[SBFMWri]] + ; CHECK: $w0 = COPY [[SDIVWr]] + ; CHECK: RET_ReallyLR implicit $w0 + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2b + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2c(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2c + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[ORNWrr]], killed [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[ORNWrr]] + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr1]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri1]], killed [[SBFMWri]] + ; CHECK: $w0 = COPY [[SDIVWr]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2c + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ab(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ab + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], killed [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs [[ORNWrr]], [[ORNWrr]], 4 + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[ADDWrs]], 0, 7 + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr2:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr1]] + ; CHECK: [[SBFMWri2:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr2]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri2]], killed [[SBFMWri]] + ; CHECK: [[SBFMWri3:%[0-9]+]]:gpr32 = SBFMWri killed [[SDIVWr]], 0, 7 + ; CHECK: [[SDIVWr1:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri3]], killed [[SBFMWri1]] + ; CHECK: $w0 = COPY [[SDIVWr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2b + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ac(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ac + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs [[ORNWrr]], [[ORNWrr]], 4 + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[ADDWrs]], 0, 7 + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[ORNWrr1]], killed [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr2:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[ORNWrr1]] + ; CHECK: [[SBFMWri2:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr2]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri2]], killed [[SBFMWri1]] + ; CHECK: [[SBFMWri3:%[0-9]+]]:gpr32 = SBFMWri killed [[SDIVWr]], 0, 7 + ; CHECK: [[SDIVWr1:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri3]], killed [[SBFMWri]] + ; CHECK: $w0 = COPY [[SDIVWr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2bc(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2bc + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 23 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr]], 0, 7 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr [[COPY]], [[COPY1]] + ; CHECK: [[MADDWrrr1:%[0-9]+]]:gpr32 = MADDWrrr [[ORNWrr]], [[MOVi32imm]], $wzr + ; CHECK: [[SBFMWri1:%[0-9]+]]:gpr32 = SBFMWri killed [[MADDWrrr1]], 0, 7 + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[ORNWrr]] + ; CHECK: [[SBFMWri2:%[0-9]+]]:gpr32 = SBFMWri killed [[ORNWrr1]], 0, 7 + ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri2]], killed [[SBFMWri1]] + ; CHECK: [[SBFMWri3:%[0-9]+]]:gpr32 = SBFMWri killed [[SDIVWr]], 0, 7 + ; CHECK: [[SDIVWr1:%[0-9]+]]:gpr32 = SDIVWr killed [[SBFMWri3]], killed [[SBFMWri]] + ; CHECK: $w0 = COPY [[SDIVWr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2b + ret i8 %r2 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant1(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]] + ; CHECK: [[ORRWri:%[0-9]+]]:gpr32sp = ORRWri killed [[ORNWrr]], 1819 + ; CHECK: $w0 = COPY [[ORRWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %and = and i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant2(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]] + ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri killed [[ORNWrr]], 1819 + ; CHECK: $w0 = COPY [[ANDWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %and = or i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; PR22723: Recognize DeMorgan's Laws when obfuscated by zexts. + +define i32 @demorgan_or_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_or_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr killed [[ORNWrr]], [[COPY]] + ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri killed [[ORNWrr1]], 0 + ; CHECK: $w0 = COPY [[ANDWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %or = or i32 %notX, %notY + ret i32 %or +} + +define i32 @demorgan_and_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_and_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY1]] + ; CHECK: [[BICWrr:%[0-9]+]]:gpr32 = BICWrr killed [[ORNWrr]], [[COPY]] + ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri killed [[BICWrr]], 0 + ; CHECK: $w0 = COPY [[ANDWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %and = and i32 %notX, %notY + ret i32 %and +} + +define <2 x i32> @demorgan_or_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_or_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $d0, $d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[NOTv8i8_:%[0-9]+]]:fpr64 = NOTv8i8 [[COPY1]] + ; CHECK: [[ORNv8i8_:%[0-9]+]]:fpr64 = ORNv8i8 killed [[NOTv8i8_]], [[COPY]] + ; CHECK: [[MOVIv2i32_:%[0-9]+]]:fpr64 = MOVIv2i32 1, 0 + ; CHECK: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 killed [[ORNv8i8_]], killed [[MOVIv2i32_]] + ; CHECK: $d0 = COPY [[ANDv8i8_]] + ; CHECK: RET_ReallyLR implicit $d0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %or = or <2 x i32> %notX, %notY + ret <2 x i32> %or +} + +define <2 x i32> @demorgan_and_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_and_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $d0, $d1 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[NOTv8i8_:%[0-9]+]]:fpr64 = NOTv8i8 [[COPY1]] + ; CHECK: [[BICv8i8_:%[0-9]+]]:fpr64 = BICv8i8 killed [[NOTv8i8_]], [[COPY]] + ; CHECK: [[MOVIv2i32_:%[0-9]+]]:fpr64 = MOVIv2i32 1, 0 + ; CHECK: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 killed [[BICv8i8_]], killed [[MOVIv2i32_]] + ; CHECK: $d0 = COPY [[ANDv8i8_]] + ; CHECK: RET_ReallyLR implicit $d0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %and = and <2 x i32> %notX, %notY + ret <2 x i32> %and +} + +define i32 @PR28476(i32 %x, i32 %y) { + ; CHECK-LABEL: name: PR28476 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32common = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32common = COPY $w0 + ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 0, 0, implicit-def $nzcv + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv + ; CHECK: [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 0, 0, implicit-def $nzcv + ; CHECK: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr killed [[CSINCWr]], killed [[CSINCWr1]] + ; CHECK: [[EORWri:%[0-9]+]]:gpr32sp = EORWri killed [[ANDWrr]], 0 + ; CHECK: $w0 = COPY [[EORWri]] + ; CHECK: RET_ReallyLR implicit $w0 + %cmp0 = icmp ne i32 %x, 0 + %cmp1 = icmp ne i32 %y, 0 + %and = and i1 %cmp0, %cmp1 + %zext = zext i1 %and to i32 + %cond = xor i32 %zext, 1 + ret i32 %cond +} + +; ~(~(a | b) | (a & b)) --> (a | b) & ~(a & b) -> a ^ b + +define i32 @demorgan_plus_and_to_xor(i32 %a, i32 %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0 + ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[COPY]], [[COPY1]] + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[COPY1]] + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr killed [[ANDWrr]], killed [[ORRWrr]] + ; CHECK: [[ORNWrr1:%[0-9]+]]:gpr32 = ORNWrr $wzr, killed [[ORNWrr]] + ; CHECK: $w0 = COPY [[ORNWrr1]] + ; CHECK: RET_ReallyLR implicit $w0 + %or = or i32 %b, %a + %notor = xor i32 %or, -1 + %and = and i32 %b, %a + %or2 = or i32 %and, %notor + %not = xor i32 %or2, -1 + ret i32 %not +} + +define <4 x i32> @demorgan_plus_and_to_xor_vec(<4 x i32> %a, <4 x i32> %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $q0, $q1 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1 + ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[ORRv16i8_:%[0-9]+]]:fpr128 = ORRv16i8 [[COPY1]], [[COPY]] + ; CHECK: [[ANDv16i8_:%[0-9]+]]:fpr128 = ANDv16i8 [[COPY1]], [[COPY]] + ; CHECK: [[ORNv16i8_:%[0-9]+]]:fpr128 = ORNv16i8 killed [[ANDv16i8_]], killed [[ORRv16i8_]] + ; CHECK: [[NOTv16i8_:%[0-9]+]]:fpr128 = NOTv16i8 killed [[ORNv16i8_]] + ; CHECK: $q0 = COPY [[NOTv16i8_]] + ; CHECK: RET_ReallyLR implicit $q0 + %or = or <4 x i32> %a, %b + %notor = xor <4 x i32> %or, < i32 -1, i32 -1, i32 -1, i32 -1 > + %and = and <4 x i32> %a, %b + %or2 = or <4 x i32> %and, %notor + %not = xor <4 x i32> %or2, < i32 -1, i32 -1, i32 -1, i32 -1 > + ret <4 x i32> %not +} Index: test/CodeGen/X86/demorgan-extra.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/demorgan-extra.ll @@ -0,0 +1,254 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan-extra.ll +; Please keep them in sync! :) + +declare void @use32(i32) +declare i32 @gen32() + +; ============================================================================ ; + +; ~(~A & B) --> (A | ~B) + +define i32 @demorgan_nand(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define <2 x i32> @demorgan_nand_vec(<2 x i32> %A, <2 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[MOVAPSrm:%[0-9]+]]:vr128 = MOVAPSrm $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool) + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[COPY1]], [[MOVAPSrm]] + ; CHECK: [[PANDrr:%[0-9]+]]:vr128 = PANDrr [[PXORrr]], [[COPY]] + ; CHECK: [[PXORrr1:%[0-9]+]]:vr128 = PXORrr [[PANDrr]], [[MOVAPSrm]] + ; CHECK: $xmm0 = COPY [[PXORrr1]] + ; CHECK: RET 0, $xmm0 + %notx = xor <2 x i32> %A, + %c = and <2 x i32> %notx, %B + %notc = xor <2 x i32> %c, + ret <2 x i32> %notc +} + +define <3 x i32> @demorgan_nand_vec_undef(<3 x i32> %A, <3 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_undef + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PANDNrr:%[0-9]+]]:vr128 = PANDNrr [[COPY1]], [[COPY]] + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[PANDNrr]], killed [[V_SETALLONES]] + ; CHECK: $xmm0 = COPY [[PXORrr]] + ; CHECK: RET 0, $xmm0 + %notx = xor <3 x i32> %A, + %c = and <3 x i32> %notx, %B + %notc = xor <3 x i32> %c, + ret <3 x i32> %notc +} + +define <4 x i32> @demorgan_nand_vec_128bit(<4 x i32> %A, <4 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_128bit + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PANDNrr:%[0-9]+]]:vr128 = PANDNrr [[COPY1]], [[COPY]] + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[PANDNrr]], killed [[V_SETALLONES]] + ; CHECK: $xmm0 = COPY [[PXORrr]] + ; CHECK: RET 0, $xmm0 + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} +define <4 x i32> @demorgan_nand_vec_128bit_undef(<4 x i32> %A, <4 x i32> %B) { + ; CHECK-LABEL: name: demorgan_nand_vec_128bit_undef + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PANDNrr:%[0-9]+]]:vr128 = PANDNrr [[COPY1]], [[COPY]] + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[PANDNrr]], killed [[V_SETALLONES]] + ; CHECK: $xmm0 = COPY [[PXORrr]] + ; CHECK: RET 0, $xmm0 + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} + +define i32 @demorgan_nand_commutative(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen32, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $eax + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY1]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %B = call i32 @gen32() + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen32, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $eax + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen32, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY2:%[0-9]+]]:gr32 = COPY $eax + ; CHECK: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY1]], [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[XOR32rr]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor_commutative(i32 %A) { + ; CHECK-LABEL: name: demorgan_nand_extraxor_commutative + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen32, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $eax + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: CALL64pcrel32 @gen32, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $eax + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: [[COPY2:%[0-9]+]]:gr32 = COPY $eax + ; CHECK: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY1]], [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[XOR32rr]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor0(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[XOR32ri8_:%[0-9]+]]:gr32 = XOR32ri8 [[COPY1]], 1, implicit-def dead $eflags + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[XOR32ri8_]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r]] + ; CHECK: RET 0, $eax + %notx = xor i32 %A, 1 ; not -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor1(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_badxor1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[XOR32ri8_:%[0-9]+]]:gr32 = XOR32ri8 [[AND32rr]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[XOR32ri8_]] + ; CHECK: RET 0, $eax + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, 1 ; not -1 + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse0(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse0 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $edi = COPY [[NOT32r]] + ; CHECK: CALL64pcrel32 @use32, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $rsp, implicit-def $ssp + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %notx) + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse1(i32 %A, i32 %B) { + ; CHECK-LABEL: name: demorgan_nand_oneuse1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $edi = COPY [[AND32rr]] + ; CHECK: CALL64pcrel32 @use32, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $rsp, implicit-def $ssp + ; CHECK: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %c) + ret i32 %notc +} + +; ============================================================================ ; Index: test/CodeGen/X86/demorgan.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/demorgan.ll @@ -0,0 +1,858 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -stop-after=machine-combiner -o - %s | FileCheck %s + +; There is a identical twin test in test/Transforms/InstCombine/demorgan.ll +; Please keep them in sync! :) + +; (~A | ~B) == ~(A & B) + +define i43 @demorgan_or_apint1(i43 %A, i43 %B) { + ; CHECK-LABEL: name: demorgan_or_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 8796093022207 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[OR64rr]] + ; CHECK: RET 0, $rax + %NotA = xor i43 %A, -1 + %NotB = xor i43 %B, -1 + %C = or i43 %NotA, %NotB + ret i43 %C +} + +; (~A | ~B) == ~(A & B) + +define i129 @demorgan_or_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_or_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr32 = COPY [[COPY3]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY6]] + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[NOT64r1]], killed [[NOT64r3]], implicit-def dead $eflags + ; CHECK: [[OR64rr1:%[0-9]+]]:gr64 = OR64rr [[NOT64r]], killed [[NOT64r2]], implicit-def dead $eflags + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[OR32rr]], %subreg.sub_32bit + ; CHECK: [[COPY8:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY8]], 1, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[OR64rr]] + ; CHECK: $rdx = COPY [[OR64rr1]] + ; CHECK: $rcx = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax, $rdx, $rcx + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = or i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i477 @demorgan_and_apint1(i477 %A, i477 %B) { + ; CHECK-LABEL: name: demorgan_and_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr64 = COPY [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr64 = COPY [[COPY5]] + ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.7, 1, $noreg, 56, $noreg :: (load 4 from %fixed-stack.7 + 56, align 8) + ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.7) + ; CHECK: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 8, $noreg :: (load 8 from %fixed-stack.7 + 8) + ; CHECK: [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 16, $noreg :: (load 8 from %fixed-stack.7 + 16) + ; CHECK: [[MOV64rm3:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 24, $noreg :: (load 8 from %fixed-stack.7 + 24) + ; CHECK: [[MOV64rm4:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 32, $noreg :: (load 8 from %fixed-stack.7 + 32) + ; CHECK: [[MOV64rm5:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 40, $noreg :: (load 8 from %fixed-stack.7 + 40) + ; CHECK: [[MOV64rm6:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.7, 1, $noreg, 48, $noreg :: (load 8 from %fixed-stack.7 + 48) + ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.9, 1, $noreg, 8, $noreg :: (load 4 from %fixed-stack.9 + 8, align 8) + ; CHECK: [[MOV64rm7:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.10, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.10, align 16) + ; CHECK: [[MOV64rm8:%[0-9]+]]:gr64 = MOV64rm %fixed-stack.9, 1, $noreg, 0, $noreg :: (load 8 from %fixed-stack.9) + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[MOV64rm8]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[MOV64rm7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r4:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[NOT64r5:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[NOT64r6:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[MOV32rm1]] + ; CHECK: [[NOT64r7:%[0-9]+]]:gr64 = NOT64r [[MOV64rm6]] + ; CHECK: [[NOT64r8:%[0-9]+]]:gr64 = NOT64r [[MOV64rm5]] + ; CHECK: [[NOT64r9:%[0-9]+]]:gr64 = NOT64r [[MOV64rm4]] + ; CHECK: [[NOT64r10:%[0-9]+]]:gr64 = NOT64r [[MOV64rm3]] + ; CHECK: [[NOT64r11:%[0-9]+]]:gr64 = NOT64r [[MOV64rm2]] + ; CHECK: [[NOT64r12:%[0-9]+]]:gr64 = NOT64r [[MOV64rm1]] + ; CHECK: [[NOT64r13:%[0-9]+]]:gr64 = NOT64r [[MOV64rm]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[MOV32rm]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r6]], killed [[NOT64r13]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[NOT64r5]], killed [[NOT64r12]], implicit-def dead $eflags + ; CHECK: [[AND64rr2:%[0-9]+]]:gr64 = AND64rr [[NOT64r4]], killed [[NOT64r11]], implicit-def dead $eflags + ; CHECK: [[AND64rr3:%[0-9]+]]:gr64 = AND64rr [[NOT64r3]], killed [[NOT64r10]], implicit-def dead $eflags + ; CHECK: [[AND64rr4:%[0-9]+]]:gr64 = AND64rr [[NOT64r2]], killed [[NOT64r9]], implicit-def dead $eflags + ; CHECK: [[AND64rr5:%[0-9]+]]:gr64 = AND64rr [[NOT64r1]], killed [[NOT64r8]], implicit-def dead $eflags + ; CHECK: [[AND64rr6:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r7]], implicit-def dead $eflags + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 48, $noreg, killed [[AND64rr6]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 40, $noreg, killed [[AND64rr5]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 32, $noreg, killed [[AND64rr4]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 24, $noreg, killed [[AND64rr3]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 16, $noreg, killed [[AND64rr2]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 8, $noreg, killed [[AND64rr1]] :: (store 8) + ; CHECK: MOV64mr [[COPY7]], 1, $noreg, 0, $noreg, killed [[AND64rr]] :: (store 8) + ; CHECK: [[AND32ri:%[0-9]+]]:gr32 = AND32ri [[AND32rr]], 536870911, implicit-def dead $eflags + ; CHECK: MOV32mr [[COPY7]], 1, $noreg, 56, $noreg, killed [[AND32ri]] :: (store 4, align 8) + ; CHECK: $rax = COPY [[COPY6]] + ; CHECK: RET 0, $rax + %NotA = xor i477 %A, -1 + %NotB = xor i477 %B, -1 + %C = and i477 %NotA, %NotB + ret i477 %C +} + +; (~A & ~B) == ~(A | B) + +define i129 @demorgan_and_apint2(i129 %A, i129 %B) { + ; CHECK-LABEL: name: demorgan_and_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx, $r8, $r9 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r9 + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $r8 + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY4:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY5:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY6:%[0-9]+]]:gr32 = COPY [[COPY3]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY6]] + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY4]] + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY5]] + ; CHECK: [[COPY7:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY7]] + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[NOT64r3:%[0-9]+]]:gr64 = NOT64r [[COPY2]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r1]], killed [[NOT64r3]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r2]], implicit-def dead $eflags + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[AND32rr]], %subreg.sub_32bit + ; CHECK: [[COPY8:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY8]], 1, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: $rcx = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax, $rdx, $rcx + %NotA = xor i129 %A, -1 + %NotB = xor i129 %B, -1 + %C = and i129 %NotA, %NotB + ret i129 %C +} + +; (~A & ~B) == ~(A | B) + +define i65 @demorgan_and_apint3(i65 %A, i65 %B) { + ; CHECK-LABEL: name: demorgan_and_apint3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 1, implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 1, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: RET 0, $rax, $rdx + %NotA = xor i65 %A, -1 + %NotB = xor i65 -1, %B + %C = and i65 %NotA, %NotB + ret i65 %C +} + +; (~A & ~B) == ~(A | B) + +define i66 @demorgan_and_apint4(i66 %A, i66 %B) { + ; CHECK-LABEL: name: demorgan_and_apint4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 3, implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 3, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: $rdx = COPY [[AND64rr1]] + ; CHECK: RET 0, $rax, $rdx + %NotA = xor i66 %A, -1 + %NotB = xor i66 %B, -1 + %C = and i66 %NotA, %NotB + ret i66 %C +} + +; (~A & ~B) == ~(A | B) + +define i47 @demorgan_and_apint5(i47 %A, i47 %B) { + ; CHECK-LABEL: name: demorgan_and_apint5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 140737488355327 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[AND64rr]] + ; CHECK: RET 0, $rax + %NotA = xor i47 %A, -1 + %NotB = xor i47 %B, -1 + %C = and i47 %NotA, %NotB + ret i47 %C +} + +; This is confirming that 2 transforms work together: +; ~(~A & ~B) --> A | B + +define i32 @test3(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test3 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[NOT32r2:%[0-9]+]]:gr32 = NOT32r [[AND32rr]] + ; CHECK: $eax = COPY [[NOT32r2]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = and i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Invert a constant if needed: +; ~(~A & 5) --> A | ~5 + +define i32 @test4(i32 %A) { + ; CHECK-LABEL: name: test4 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[COPY]], -6, implicit-def dead $eflags + ; CHECK: $eax = COPY [[OR32ri8_]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %c = and i32 %nota, 5 + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Test the mirror of DeMorgan's law with an extra 'not'. +; ~(~A | ~B) --> A & B + +define i32 @test5(i32 %A, i32 %B) { + ; CHECK-LABEL: name: test5 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r]], killed [[NOT32r1]], implicit-def dead $eflags + ; CHECK: [[NOT32r2:%[0-9]+]]:gr32 = NOT32r [[OR32rr]] + ; CHECK: $eax = COPY [[NOT32r2]] + ; CHECK: RET 0, $eax + %nota = xor i32 %A, -1 + %notb = xor i32 %B, -1 + %c = or i32 %nota, %notb + %notc = xor i32 %c, -1 + ret i32 %notc +} + +; Repeat with weird types for extra coverage. +; ~(~A & ~B) --> A | B + +define i47 @test3_apint(i47 %A, i47 %B) { + ; CHECK-LABEL: name: test3_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 140737488355327 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], killed [[XOR64rr1]], implicit-def dead $eflags + ; CHECK: [[XOR64rr2:%[0-9]+]]:gr64 = XOR64rr [[AND64rr]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: $rax = COPY [[XOR64rr2]] + ; CHECK: RET 0, $rax + %nota = xor i47 %A, -1 + %notb = xor i47 %B, -1 + %c = and i47 %nota, %notb + %notc = xor i47 %c, -1 + ret i47 %notc +} + +; ~(~A & 5) --> A | ~5 + +define i61 @test4_apint(i61 %A) { + ; CHECK-LABEL: name: test4_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], killed [[NOT32r]], %subreg.sub_32bit + ; CHECK: [[COPY2:%[0-9]+]]:gr32 = COPY [[INSERT_SUBREG]].sub_32bit + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY2]], 5, implicit-def dead $eflags + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[AND32ri8_]], %subreg.sub_32bit + ; CHECK: $rax = COPY [[SUBREG_TO_REG]] + ; CHECK: RET 0, $rax + %nota = xor i61 %A, -1 + %c = and i61 %nota, 5 ; 5 = ~c2 + %notc = xor i61 %c, -1 + ret i61 %c +} + +; ~(~A | ~B) --> A & B + +define i71 @test5_apint(i71 %A, i71 %B) { + ; CHECK-LABEL: name: test5_apint + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[XOR64ri8_:%[0-9]+]]:gr64 = XOR64ri8 [[COPY2]], 127, implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[COPY1]] + ; CHECK: [[XOR64ri8_1:%[0-9]+]]:gr64 = XOR64ri8 [[COPY]], 127, implicit-def dead $eflags + ; CHECK: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[XOR64ri8_]], killed [[XOR64ri8_1]], implicit-def dead $eflags + ; CHECK: [[OR64rr1:%[0-9]+]]:gr64 = OR64rr [[NOT64r]], killed [[NOT64r1]], implicit-def dead $eflags + ; CHECK: [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[OR64rr1]] + ; CHECK: [[XOR64ri8_2:%[0-9]+]]:gr64 = XOR64ri8 [[OR64rr]], 127, implicit-def dead $eflags + ; CHECK: $rax = COPY [[NOT64r2]] + ; CHECK: $rdx = COPY [[XOR64ri8_2]] + ; CHECK: RET 0, $rax, $rdx + %nota = xor i71 %A, -1 + %notb = xor i71 %B, -1 + %c = or i71 %nota, %notb + %notc = xor i71 %c, -1 + ret i71 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i8 @demorgan_nand(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[AND8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = and i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i7 @demorgan_nand_apint1(i7 %A, i7 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[XOR8ri:%[0-9]+]]:gr8 = XOR8ri [[COPY3]], 127, implicit-def dead $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[XOR8ri]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[XOR8ri1:%[0-9]+]]:gr8 = XOR8ri [[AND8rr]], 127, implicit-def dead $eflags + ; CHECK: $al = COPY [[XOR8ri1]] + ; CHECK: RET 0, $al + %nota = xor i7 %A, -1 + %c = and i7 %nota, %B + %notc = xor i7 %c, -1 + ret i7 %notc +} + +; ~(~A & B) --> (A | ~B) + +define i117 @demorgan_nand_apint2(i117 %A, i117 %B) { + ; CHECK-LABEL: name: demorgan_nand_apint2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $rdi, $rsi, $rdx, $rcx + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rcx + ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rdx + ; CHECK: [[COPY2:%[0-9]+]]:gr64 = COPY $rsi + ; CHECK: [[COPY3:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 9007199254740991 + ; CHECK: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY2]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[COPY3]] + ; CHECK: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[NOT64r]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[AND64rr1:%[0-9]+]]:gr64 = AND64rr [[XOR64rr]], [[COPY]], implicit-def dead $eflags + ; CHECK: [[XOR64rr1:%[0-9]+]]:gr64 = XOR64rr [[AND64rr1]], [[MOV64ri]], implicit-def dead $eflags + ; CHECK: [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[AND64rr]] + ; CHECK: $rax = COPY [[NOT64r1]] + ; CHECK: $rdx = COPY [[XOR64rr1]] + ; CHECK: RET 0, $rax, $rdx + %nota = xor i117 %A, -1 + %c = and i117 %nota, %B + %notc = xor i117 %c, -1 + ret i117 %notc +} + +; ~(~A | B) --> (A & ~B) + +define i8 @demorgan_nor(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: RET 0, $al + %notx = xor i8 %A, -1 + %c = or i8 %notx, %B + %notc = xor i8 %c, -1 + ret i8 %notc +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2a(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2a + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 23 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2a + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2b(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2b + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2b + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use one of the intermediate results? + +define i8 @demorgan_nor_use2c(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2c + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY5]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r = sdiv i8 %notc, %use2c + ret i8 %r +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ab(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ab + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[MOV8ri1:%[0-9]+]]:gr8 = MOV8ri 17 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri1]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2b + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2ac(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2ac + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY3]] + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 17 + ; CHECK: $al = COPY [[NOT8r]] + ; CHECK: MUL8r killed [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], killed [[COPY2]], implicit-def dead $eflags + ; CHECK: [[MOV8ri1:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r killed [[MOV8ri1]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %nota = xor i8 %A, -1 + %use2a = mul i8 %nota, 17 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2a + ret i8 %r2 +} + +; ~(~A | B) --> (A & ~B) - what if we use two of the intermediate results? + +define i8 @demorgan_nor_use2bc(i8 %A, i8 %B) { + ; CHECK-LABEL: name: demorgan_nor_use2bc + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; CHECK: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 23 + ; CHECK: $al = COPY [[COPY3]] + ; CHECK: MUL8r [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY4:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r:%[0-9]+]]:gr8 = NOT8r [[COPY2]] + ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[NOT8r]], [[COPY3]], implicit-def dead $eflags + ; CHECK: $al = COPY [[OR8rr]] + ; CHECK: MUL8r [[MOV8ri]], implicit-def $al, implicit-def dead $eflags, implicit-def $ax, implicit $al + ; CHECK: [[COPY5:%[0-9]+]]:gr8 = COPY $al + ; CHECK: [[NOT8r1:%[0-9]+]]:gr8 = NOT8r [[OR8rr]] + ; CHECK: $al = COPY [[NOT8r1]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY5]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY6:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY6]] + ; CHECK: CBW implicit-def $ax, implicit $al + ; CHECK: IDIV8r killed [[COPY4]], implicit-def $al, implicit-def dead $ah, implicit-def dead $eflags, implicit $ax + ; CHECK: [[COPY7:%[0-9]+]]:gr8 = COPY $al + ; CHECK: $al = COPY [[COPY7]] + ; CHECK: RET 0, $al + %use2b = mul i8 %B, 23 + %nota = xor i8 %A, -1 + %c = or i8 %nota, %B + %use2c = mul i8 %c, 23 + %notc = xor i8 %c, -1 + %r1 = sdiv i8 %notc, %use2c + %r2 = sdiv i8 %r1, %use2b + ret i8 %r2 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant1(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant1 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[NOT32r]], -16, implicit-def dead $eflags + ; CHECK: $eax = COPY [[OR32ri8_]] + ; CHECK: RET 0, $eax + %and = and i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; Do not apply DeMorgan's Law to constants. We prefer 'not' ops. + +define i32 @demorganize_constant2(i32 %a) { + ; CHECK-LABEL: name: demorganize_constant2 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[NOT32r]], -16, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %and = or i32 %a, 15 + %and1 = xor i32 %and, -1 + ret i32 %and1 +} + +; PR22723: Recognize DeMorgan's Laws when obfuscated by zexts. + +define i32 @demorgan_or_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_or_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[NOT32r1]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[OR32rr]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %or = or i32 %notX, %notY + ret i32 %or +} + +define i32 @demorgan_and_zext(i1 %X, i1 %Y) { + ; CHECK-LABEL: name: demorgan_and_zext + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[COPY]] + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[COPY1]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[NOT32r1]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[AND32rr]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[AND32ri8_]] + ; CHECK: RET 0, $eax + %zextX = zext i1 %X to i32 + %zextY = zext i1 %Y to i32 + %notX = xor i32 %zextX, 1 + %notY = xor i32 %zextY, 1 + %and = and i32 %notX, %notY + ret i32 %and +} + +define <2 x i32> @demorgan_or_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_or_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[COPY1]], [[V_SETALLONES]] + ; CHECK: [[PXORrr1:%[0-9]+]]:vr128 = PXORrr [[COPY]], [[V_SETALLONES]] + ; CHECK: [[PORrr:%[0-9]+]]:vr128 = PORrr [[PXORrr]], killed [[PXORrr1]] + ; CHECK: [[PANDrm:%[0-9]+]]:vr128 = PANDrm [[PORrr]], $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool) + ; CHECK: $xmm0 = COPY [[PANDrm]] + ; CHECK: RET 0, $xmm0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %or = or <2 x i32> %notX, %notY + ret <2 x i32> %or +} + +define <2 x i32> @demorgan_and_zext_vec(<2 x i1> %X, <2 x i1> %Y) { + ; CHECK-LABEL: name: demorgan_and_zext_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[COPY]], killed [[V_SETALLONES]] + ; CHECK: [[PANDNrr:%[0-9]+]]:vr128 = PANDNrr [[COPY1]], killed [[PXORrr]] + ; CHECK: [[PANDrm:%[0-9]+]]:vr128 = PANDrm [[PANDNrr]], $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool) + ; CHECK: $xmm0 = COPY [[PANDrm]] + ; CHECK: RET 0, $xmm0 + %zextX = zext <2 x i1> %X to <2 x i32> + %zextY = zext <2 x i1> %Y to <2 x i32> + %notX = xor <2 x i32> %zextX, + %notY = xor <2 x i32> %zextY, + %and = and <2 x i32> %notX, %notY + ret <2 x i32> %and +} + +define i32 @PR28476(i32 %x, i32 %y) { + ; CHECK-LABEL: name: PR28476 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: TEST32rr [[COPY1]], [[COPY1]], implicit-def $eflags + ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags + ; CHECK: TEST32rr [[COPY]], [[COPY]], implicit-def $eflags + ; CHECK: [[SETNEr1:%[0-9]+]]:gr8 = SETNEr implicit $eflags + ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETNEr]], killed [[SETNEr1]], implicit-def dead $eflags + ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 killed [[AND8rr]] + ; CHECK: [[XOR32ri8_:%[0-9]+]]:gr32 = XOR32ri8 [[MOVZX32rr8_]], 1, implicit-def dead $eflags + ; CHECK: $eax = COPY [[XOR32ri8_]] + ; CHECK: RET 0, $eax + %cmp0 = icmp ne i32 %x, 0 + %cmp1 = icmp ne i32 %y, 0 + %and = and i1 %cmp0, %cmp1 + %zext = zext i1 %and to i32 + %cond = xor i32 %zext, 1 + ret i32 %cond +} + +; ~(~(a | b) | (a & b)) --> (a | b) & ~(a & b) -> a ^ b + +define i32 @demorgan_plus_and_to_xor(i32 %a, i32 %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $edi, $esi + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi + ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $edi + ; CHECK: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[NOT32r:%[0-9]+]]:gr32 = NOT32r [[OR32rr]] + ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def dead $eflags + ; CHECK: [[OR32rr1:%[0-9]+]]:gr32 = OR32rr [[AND32rr]], killed [[NOT32r]], implicit-def dead $eflags + ; CHECK: [[NOT32r1:%[0-9]+]]:gr32 = NOT32r [[OR32rr1]] + ; CHECK: $eax = COPY [[NOT32r1]] + ; CHECK: RET 0, $eax + %or = or i32 %b, %a + %notor = xor i32 %or, -1 + %and = and i32 %b, %a + %or2 = or i32 %and, %notor + %not = xor i32 %or2, -1 + ret i32 %not +} + +define <4 x i32> @demorgan_plus_and_to_xor_vec(<4 x i32> %a, <4 x i32> %b) { + ; CHECK-LABEL: name: demorgan_plus_and_to_xor_vec + ; CHECK: bb.0 (%ir-block.0): + ; CHECK: liveins: $xmm0, $xmm1 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1 + ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK: [[PORrr:%[0-9]+]]:vr128 = PORrr [[COPY1]], [[COPY]] + ; CHECK: [[V_SETALLONES:%[0-9]+]]:vr128 = V_SETALLONES + ; CHECK: [[PXORrr:%[0-9]+]]:vr128 = PXORrr [[PORrr]], [[V_SETALLONES]] + ; CHECK: [[PANDrr:%[0-9]+]]:vr128 = PANDrr [[COPY1]], [[COPY]] + ; CHECK: [[PORrr1:%[0-9]+]]:vr128 = PORrr [[PANDrr]], killed [[PXORrr]] + ; CHECK: [[PXORrr1:%[0-9]+]]:vr128 = PXORrr [[PORrr1]], [[V_SETALLONES]] + ; CHECK: $xmm0 = COPY [[PXORrr1]] + ; CHECK: RET 0, $xmm0 + %or = or <4 x i32> %a, %b + %notor = xor <4 x i32> %or, < i32 -1, i32 -1, i32 -1, i32 -1 > + %and = and <4 x i32> %a, %b + %or2 = or <4 x i32> %and, %notor + %not = xor <4 x i32> %or2, < i32 -1, i32 -1, i32 -1, i32 -1 > + ret <4 x i32> %not +} Index: test/Transforms/InstCombine/demorgan-extra.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/demorgan-extra.ll @@ -0,0 +1,180 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +; There is a identical twin test in test/CodeGen/{X86,AArch64}/demorgan-extra.ll +; Please keep them in sync! :) + +declare void @use32(i32) +declare i32 @gen32() + +; ============================================================================ ; + +; ~(~A & B) --> (A | ~B) + +define i32 @demorgan_nand(i32 %A, i32 %B) { +; CHECK-LABEL: @demorgan_nand( +; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i32 [[NOTC]] +; + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define <2 x i32> @demorgan_nand_vec(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @demorgan_nand_vec( +; CHECK-NEXT: [[B_NOT:%.*]] = xor <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[NOTC:%.*]] = or <2 x i32> [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret <2 x i32> [[NOTC]] +; + %notx = xor <2 x i32> %A, + %c = and <2 x i32> %notx, %B + %notc = xor <2 x i32> %c, + ret <2 x i32> %notc +} + +define <3 x i32> @demorgan_nand_vec_undef(<3 x i32> %A, <3 x i32> %B) { +; CHECK-LABEL: @demorgan_nand_vec_undef( +; CHECK-NEXT: [[B_NOT:%.*]] = xor <3 x i32> [[B:%.*]], +; CHECK-NEXT: [[NOTC:%.*]] = or <3 x i32> [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret <3 x i32> [[NOTC]] +; + %notx = xor <3 x i32> %A, + %c = and <3 x i32> %notx, %B + %notc = xor <3 x i32> %c, + ret <3 x i32> %notc +} + +define <4 x i32> @demorgan_nand_vec_128bit(<4 x i32> %A, <4 x i32> %B) { +; CHECK-LABEL: @demorgan_nand_vec_128bit( +; CHECK-NEXT: [[B_NOT:%.*]] = xor <4 x i32> [[B:%.*]], +; CHECK-NEXT: [[NOTC:%.*]] = or <4 x i32> [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret <4 x i32> [[NOTC]] +; + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} + +define <4 x i32> @demorgan_nand_vec_128bit_undef(<4 x i32> %A, <4 x i32> %B) { +; CHECK-LABEL: @demorgan_nand_vec_128bit_undef( +; CHECK-NEXT: [[B_NOT:%.*]] = xor <4 x i32> [[B:%.*]], +; CHECK-NEXT: [[NOTC:%.*]] = or <4 x i32> [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret <4 x i32> [[NOTC]] +; + %notx = xor <4 x i32> %A, + %c = and <4 x i32> %notx, %B + %notc = xor <4 x i32> %c, + ret <4 x i32> %notc +} + +define i32 @demorgan_nand_commutative(i32 %A) { +; CHECK-LABEL: @demorgan_nand_commutative( +; CHECK-NEXT: [[B:%.*]] = call i32 @gen32() +; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i32 [[NOTC]] +; + %B = call i32 @gen32() + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor(i32 %A) { +; CHECK-LABEL: @demorgan_nand_extraxor( +; CHECK-NEXT: [[V:%.*]] = call i32 @gen32() +; CHECK-NEXT: [[Z:%.*]] = call i32 @gen32() +; CHECK-NEXT: [[B:%.*]] = xor i32 [[V]], [[Z]] +; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i32 [[NOTC]] +; + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_extraxor_commutative(i32 %A) { +; CHECK-LABEL: @demorgan_nand_extraxor_commutative( +; CHECK-NEXT: [[V:%.*]] = call i32 @gen32() +; CHECK-NEXT: [[Z:%.*]] = call i32 @gen32() +; CHECK-NEXT: [[B:%.*]] = xor i32 [[V]], [[Z]] +; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: ret i32 [[NOTC]] +; + %V = call i32 @gen32() + %Z = call i32 @gen32() + %B = xor i32 %V, %Z ; not with -1 + %notx = xor i32 %A, -1 + %c = and i32 %B, %notx ; swapped + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor0(i32 %A, i32 %B) { +; CHECK-LABEL: @demorgan_nand_badxor0( +; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[A:%.*]], 1 +; CHECK-NEXT: [[C:%.*]] = and i32 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i32 [[C]], -1 +; CHECK-NEXT: ret i32 [[NOTC]] +; + %notx = xor i32 %A, 1 ; not -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + ret i32 %notc +} + +define i32 @demorgan_nand_badxor1(i32 %A, i32 %B) { +; CHECK-LABEL: @demorgan_nand_badxor1( +; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[A:%.*]], -1 +; CHECK-NEXT: [[C:%.*]] = and i32 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i32 [[C]], 1 +; CHECK-NEXT: ret i32 [[NOTC]] +; + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, 1 ; not -1 + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse0(i32 %A, i32 %B) { +; CHECK-LABEL: @demorgan_nand_oneuse0( +; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[A:%.*]], -1 +; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 +; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[B_NOT]], [[A]] +; CHECK-NEXT: call void @use32(i32 [[NOTX]]) +; CHECK-NEXT: ret i32 [[NOTC]] +; + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %notx) + ret i32 %notc +} + +define i32 @demorgan_nand_oneuse1(i32 %A, i32 %B) { +; CHECK-LABEL: @demorgan_nand_oneuse1( +; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[A:%.*]], -1 +; CHECK-NEXT: [[C:%.*]] = and i32 [[NOTX]], [[B:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = xor i32 [[C]], -1 +; CHECK-NEXT: call void @use32(i32 [[C]]) +; CHECK-NEXT: ret i32 [[NOTC]] +; + %notx = xor i32 %A, -1 + %c = and i32 %notx, %B + %notc = xor i32 %c, -1 + call void @use32(i32 %c) + ret i32 %notc +} + +; ============================================================================ ; Index: test/Transforms/InstCombine/demorgan.ll =================================================================== --- test/Transforms/InstCombine/demorgan.ll +++ test/Transforms/InstCombine/demorgan.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s +; There is a identical twin test in test/CodeGen/{X86,AArch64}/demorgan.ll +; Please keep them in sync! :) + ; (~A | ~B) == ~(A & B) define i43 @demorgan_or_apint1(i43 %A, i43 %B) {