diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll --- a/llvm/test/CodeGen/X86/combine-add.ll +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -510,3 +510,65 @@ %r = add i1 %cmpyz, true ret i1 %r } + +define <2 x i64> @add_vec_x_notx(<2 x i64> %v0) nounwind { +; SSE-LABEL: add_vec_x_notx: +; SSE: # %bb.0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm0, %xmm1 +; SSE-NEXT: paddq %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: add_vec_x_notx: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = xor <2 x i64> %v0, + %y = add <2 x i64> %v0, %x + ret <2 x i64> %y +} + +define <2 x i64> @add_vec_notx_x(<2 x i64> %v0) nounwind { +; SSE-LABEL: add_vec_notx_x: +; SSE: # %bb.0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm0, %xmm1 +; SSE-NEXT: paddq %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: add_vec_notx_x: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %x = xor <2 x i64> %v0, + %y = add <2 x i64> %x, %v0 + ret <2 x i64> %y +} + +define i64 @add_x_notx(i64 %v0) nounwind { +; CHECK-LABEL: add_x_notx: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: addq %rdi, %rax +; CHECK-NEXT: retq + %x = xor i64 %v0, -1 + %y = add i64 %v0, %x + ret i64 %y +} + +define i64 @add_notx_x(i64 %v0) nounwind { +; CHECK-LABEL: add_notx_x: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: addq %rdi, %rax +; CHECK-NEXT: retq + %x = xor i64 %v0, -1 + %y = add i64 %x, %v0 + ret i64 %y +} diff --git a/llvm/test/CodeGen/X86/combine-and.ll b/llvm/test/CodeGen/X86/combine-and.ll --- a/llvm/test/CodeGen/X86/combine-and.ll +++ b/llvm/test/CodeGen/X86/combine-and.ll @@ -1176,3 +1176,57 @@ %4 = and <4 x i32> %3, %a1 ret <4 x i32> %4 } + +define <2 x i64> @andnp_xx(<2 x i64> %v0) nounwind { +; SSE-LABEL: andnp_xx: +; SSE: # %bb.0: +; SSE-NEXT: andnps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: andnp_xx: +; AVX: # %bb.0: +; AVX-NEXT: vandnps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = xor <2 x i64> %v0, + %y = and <2 x i64> %v0, %x + ret <2 x i64> %y +} + +define <2 x i64> @andnp_xx_2(<2 x i64> %v0) nounwind { +; SSE-LABEL: andnp_xx_2: +; SSE: # %bb.0: +; SSE-NEXT: andnps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: andnp_xx_2: +; AVX: # %bb.0: +; AVX-NEXT: vandnps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = xor <2 x i64> %v0, + %y = and <2 x i64> %x, %v0 + ret <2 x i64> %y +} + +define i64 @andn_xx(i64 %v0) nounwind { +; CHECK-LABEL: andn_xx: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: andq %rdi, %rax +; CHECK-NEXT: retq + %x = xor i64 %v0, -1 + %y = and i64 %v0, %x + ret i64 %y +} + +define i64 @andn_xx_2(i64 %v0) nounwind { +; CHECK-LABEL: andn_xx_2: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: andq %rdi, %rax +; CHECK-NEXT: retq + %x = xor i64 %v0, -1 + %y = and i64 %x, %v0 + ret i64 %y +}