diff --git a/llvm/test/CodeGen/X86/vec_no-common-bits.ll b/llvm/test/CodeGen/X86/vec_no-common-bits.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/vec_no-common-bits.ll @@ -0,0 +1,161 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK + +; In the following patterns, lhs and rhs of the or instruction have no common bits. +; Therefore, "add" and "or" instructions are equal. + +define <2 x i32> @or_and_and_rhs_neg_vec_i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) { +; CHECK-LABEL: or_and_and_rhs_neg_vec_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm2 +; CHECK-NEXT: pandn %xmm0, %xmm1 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: por %xmm1, %xmm0 +; CHECK-NEXT: paddd %xmm2, %xmm1 +; CHECK-NEXT: psubd %xmm1, %xmm0 +; CHECK-NEXT: retq + %and1 = and <2 x i32> %z, %y + %xor = xor <2 x i32> %y, + %and2 = and <2 x i32> %x, %xor + %or = or <2 x i32> %and1, %and2 + %add = add <2 x i32> %and1, %and2 + %sub = sub <2 x i32> %or, %add + ret <2 x i32> %sub +} + +define <2 x i32> @or_and_and_lhs_neg_vec_i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) { +; CHECK-LABEL: or_and_and_lhs_neg_vec_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm2 +; CHECK-NEXT: pandn %xmm0, %xmm1 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: por %xmm1, %xmm0 +; CHECK-NEXT: paddd %xmm2, %xmm1 +; CHECK-NEXT: psubd %xmm1, %xmm0 +; CHECK-NEXT: retq + %and1 = and <2 x i32> %z, %y + %xor = xor <2 x i32> %y, + %and2 = and <2 x i32> %xor, %x + %or = or <2 x i32> %and1, %and2 + %add = add <2 x i32> %and1, %and2 + %sub = sub <2 x i32> %or, %add + ret <2 x i32> %sub +} + +define <2 x i32> @or_and_rhs_neg_and_vec_i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) { +; CHECK-LABEL: or_and_rhs_neg_and_vec_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm0 +; CHECK-NEXT: pandn %xmm2, %xmm1 +; CHECK-NEXT: movdqa %xmm1, %xmm2 +; CHECK-NEXT: por %xmm0, %xmm2 +; CHECK-NEXT: paddd %xmm0, %xmm1 +; CHECK-NEXT: psubd %xmm1, %xmm2 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %xor = xor <2 x i32> %y, + %and1 = and <2 x i32> %z, %xor + %and2 = and <2 x i32> %x, %y + %or = or <2 x i32> %and1, %and2 + %add = add <2 x i32> %and1, %and2 + %sub = sub <2 x i32> %or, %add + ret <2 x i32> %sub +} + +define <2 x i32> @or_and_lhs_neg_and_vec_i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) { +; CHECK-LABEL: or_and_lhs_neg_and_vec_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm0 +; CHECK-NEXT: pandn %xmm2, %xmm1 +; CHECK-NEXT: movdqa %xmm1, %xmm2 +; CHECK-NEXT: por %xmm0, %xmm2 +; CHECK-NEXT: paddd %xmm0, %xmm1 +; CHECK-NEXT: psubd %xmm1, %xmm2 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %xor = xor <2 x i32> %y, + %and1 = and <2 x i32> %xor, %z + %and2 = and <2 x i32> %x, %y + %or = or <2 x i32> %and1, %and2 + %add = add <2 x i32> %and1, %and2 + %sub = sub <2 x i32> %or, %add + ret <2 x i32> %sub +} + +define <2 x i64> @or_and_and_rhs_neg_vec_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z) { +; CHECK-LABEL: or_and_and_rhs_neg_vec_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm2 +; CHECK-NEXT: pandn %xmm0, %xmm1 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: por %xmm1, %xmm0 +; CHECK-NEXT: paddq %xmm2, %xmm1 +; CHECK-NEXT: psubq %xmm1, %xmm0 +; CHECK-NEXT: retq + %and1 = and <2 x i64> %z, %y + %xor = xor <2 x i64> %y, + %and2 = and <2 x i64> %x, %xor + %or = or <2 x i64> %and1, %and2 + %add = add <2 x i64> %and1, %and2 + %sub = sub <2 x i64> %or, %add + ret <2 x i64> %sub +} + +define <2 x i64> @or_and_and_lhs_neg_vec_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z) { +; CHECK-LABEL: or_and_and_lhs_neg_vec_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm2 +; CHECK-NEXT: pandn %xmm0, %xmm1 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: por %xmm1, %xmm0 +; CHECK-NEXT: paddq %xmm2, %xmm1 +; CHECK-NEXT: psubq %xmm1, %xmm0 +; CHECK-NEXT: retq + %and1 = and <2 x i64> %z, %y + %xor = xor <2 x i64> %y, + %and2 = and <2 x i64> %xor, %x + %or = or <2 x i64> %and1, %and2 + %add = add <2 x i64> %and1, %and2 + %sub = sub <2 x i64> %or, %add + ret <2 x i64> %sub +} + +define <2 x i64> @or_and_rhs_neg_and_vec_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z) { +; CHECK-LABEL: or_and_rhs_neg_and_vec_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm0 +; CHECK-NEXT: pandn %xmm2, %xmm1 +; CHECK-NEXT: movdqa %xmm1, %xmm2 +; CHECK-NEXT: por %xmm0, %xmm2 +; CHECK-NEXT: paddq %xmm0, %xmm1 +; CHECK-NEXT: psubq %xmm1, %xmm2 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %xor = xor <2 x i64> %y, + %and1 = and <2 x i64> %z, %xor + %and2 = and <2 x i64> %x, %y + %or = or <2 x i64> %and1, %and2 + %add = add <2 x i64> %and1, %and2 + %sub = sub <2 x i64> %or, %add + ret <2 x i64> %sub +} + +define <2 x i64> @or_and_lhs_neg_and_vec_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z) { +; CHECK-LABEL: or_and_lhs_neg_and_vec_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: pand %xmm1, %xmm0 +; CHECK-NEXT: pandn %xmm2, %xmm1 +; CHECK-NEXT: movdqa %xmm1, %xmm2 +; CHECK-NEXT: por %xmm0, %xmm2 +; CHECK-NEXT: paddq %xmm0, %xmm1 +; CHECK-NEXT: psubq %xmm1, %xmm2 +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %xor = xor <2 x i64> %y, + %and1 = and <2 x i64> %xor, %z + %and2 = and <2 x i64> %x, %y + %or = or <2 x i64> %and1, %and2 + %add = add <2 x i64> %and1, %and2 + %sub = sub <2 x i64> %or, %add + ret <2 x i64> %sub +}