diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6625,6 +6625,14 @@ !DAG.isConstantIntBuildVectorOrConstantInt(N1)) return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0); + auto IsAndnXX = [](SDValue Op0, SDValue Op1) { + return Op0.getOpcode() == ISD::XOR && Op0.getOperand(0) == Op1 && + isAllOnesOrAllOnesSplat(Op0.getOperand(1)); + }; + if (IsAndnXX(N0, N1) || IsAndnXX(N1, N0)) + return DAG.getConstant(APInt::getZero(N1.getScalarValueSizeInBits()), + SDLoc(N), N1.getValueType()); + // fold vector ops if (VT.isVector()) { if (SDValue FoldedVOp = SimplifyVBinOp(N, SDLoc(N))) diff --git a/llvm/test/CodeGen/X86/andn-x-x.ll b/llvm/test/CodeGen/X86/andn-x-x.ll --- a/llvm/test/CodeGen/X86/andn-x-x.ll +++ b/llvm/test/CodeGen/X86/andn-x-x.ll @@ -4,7 +4,7 @@ define <2 x i64> @andnp_xx(<2 x i64> %v0) nounwind { ; CHECK-LABEL: andnp_xx: ; CHECK: # %bb.0: -; CHECK-NEXT: andnps %xmm0, %xmm0 +; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: retq %x = xor <2 x i64> %v0, %y = and <2 x i64> %v0, %x @@ -14,7 +14,7 @@ define <2 x i64> @andnp_xx_2(<2 x i64> %v0) nounwind { ; CHECK-LABEL: andnp_xx_2: ; CHECK: # %bb.0: -; CHECK-NEXT: andnps %xmm0, %xmm0 +; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: retq %x = xor <2 x i64> %v0, %y = and <2 x i64> %x, %v0 @@ -24,9 +24,7 @@ define i64 @andn_xx(i64 %v0) nounwind { ; CHECK-LABEL: andn_xx: ; CHECK: # %bb.0: -; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: notq %rax -; CHECK-NEXT: andq %rdi, %rax +; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: retq %x = xor i64 %v0, -1 %y = and i64 %v0, %x @@ -36,9 +34,7 @@ define i64 @andn_xx_2(i64 %v0) nounwind { ; CHECK-LABEL: andn_xx_2: ; CHECK: # %bb.0: -; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: notq %rax -; CHECK-NEXT: andq %rdi, %rax +; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: retq %x = xor i64 %v0, -1 %y = and i64 %x, %v0 diff --git a/llvm/test/CodeGen/X86/fold-masked-merge.ll b/llvm/test/CodeGen/X86/fold-masked-merge.ll --- a/llvm/test/CodeGen/X86/fold-masked-merge.ll +++ b/llvm/test/CodeGen/X86/fold-masked-merge.ll @@ -187,21 +187,11 @@ ; not a masked merge: `not` operand must not be on same `and`. define i32 @not_a_masked_merge4(i32 %a0, i32 %a1, i32 %a2) { -; NOBMI-LABEL: not_a_masked_merge4: -; NOBMI: # %bb.0: -; NOBMI-NEXT: andl %esi, %edi -; NOBMI-NEXT: movl %edx, %eax -; NOBMI-NEXT: notl %eax -; NOBMI-NEXT: andl %edx, %eax -; NOBMI-NEXT: orl %edi, %eax -; NOBMI-NEXT: retq -; -; BMI-LABEL: not_a_masked_merge4: -; BMI: # %bb.0: -; BMI-NEXT: andl %esi, %edi -; BMI-NEXT: andnl %edx, %edx, %eax -; BMI-NEXT: orl %edi, %eax -; BMI-NEXT: retq +; CHECK-LABEL: not_a_masked_merge4: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: andl %esi, %eax +; CHECK-NEXT: retq %and0 = and i32 %a0, %a1 %not = xor i32 %a2, -1 %and1 = and i32 %not, %a2 diff --git a/llvm/test/CodeGen/X86/setcc-combine.ll b/llvm/test/CodeGen/X86/setcc-combine.ll --- a/llvm/test/CodeGen/X86/setcc-combine.ll +++ b/llvm/test/CodeGen/X86/setcc-combine.ll @@ -244,14 +244,9 @@ ; CHECK-LABEL: test_i1_uge: ; CHECK: # %bb.0: ; CHECK-NEXT: movzbl (%rdi), %eax -; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: xorb $1, %cl -; CHECK-NEXT: andb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: negq %rax -; CHECK-NEXT: andb $1, %cl -; CHECK-NEXT: movb %cl, (%rdi,%rax) +; CHECK-NEXT: notb %al +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movb %al, (%rdi) ; CHECK-NEXT: retq %L5 = load i1, ptr %A2 %C3 = icmp ne i1 %L5, true