diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -46377,6 +46377,60 @@ return Ret; } +static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R, + SDValue And1_L, SDValue And1_R, SDLoc DL, + SelectionDAG &DAG) { + if (!isBitwiseNot(And0_L, true)) + return SDValue(); + SDValue NotOp = And0_L->getOperand(0); + if (NotOp == And1_R) { + std::swap(And1_R, And1_L); + } + // (~(NotOp) & And0_R) | (NotOp & And1_R) + // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R + if (NotOp == And1_L) { + EVT VT = And1_L->getValueType(0); + SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R); + SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R); + SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp); + SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R); + return Xor1; + } + return SDValue(); +} + +/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the +/// equivalent `((x ^ y) & m) ^ y)` pattern. +/// This is typically a better representation for targets without a fused +/// "and-not" operation. This function is intended to be called from a +/// `TargetLowering::PerformDAGCombing` callback on `ISD::OR` nodes. +static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) { + // Note that masked-merge variants using XOR or ADD expressions are + // normalized to OR by InstCombine so we only check for OR. + assert(Node->getOpcode() == ISD::OR); + SDValue N0 = Node->getOperand(0); + if (N0->getOpcode() != ISD::AND) + return SDValue(); + SDValue N1 = Node->getOperand(1); + if (N1->getOpcode() != ISD::AND) + return SDValue(); + + SDLoc DL(Node); + SDValue N00 = N0->getOperand(0); + SDValue N01 = N0->getOperand(1); + SDValue N10 = N1->getOperand(0); + SDValue N11 = N1->getOperand(1); + if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG)) + return Result; + if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG)) + return Result; + if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG)) + return Result; + if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG)) + return Result; + return SDValue(); +} + static SDValue combineOr(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { @@ -46470,6 +46524,12 @@ return Res; } + // We should fold "masked merge" patterns when `andn` is not avilable. + if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1) { + if (SDValue R = foldMaskedMerge(N, DAG)) + return R; + } + return SDValue(); } diff --git a/llvm/test/CodeGen/X86/fold-masked-merge.ll b/llvm/test/CodeGen/X86/fold-masked-merge.ll --- a/llvm/test/CodeGen/X86/fold-masked-merge.ll +++ b/llvm/test/CodeGen/X86/fold-masked-merge.ll @@ -8,11 +8,10 @@ define i32 @masked_merge0(i32 %a0, i32 %a1, i32 %a2) { ; CHECK-LABEL: masked_merge0: ; CHECK: # %bb.0: -; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: andl %edi, %esi -; CHECK-NEXT: notl %eax -; CHECK-NEXT: andl %edx, %eax -; CHECK-NEXT: orl %esi, %eax +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: xorl %edx, %eax +; CHECK-NEXT: andl %edi, %eax +; CHECK-NEXT: xorl %edx, %eax ; CHECK-NEXT: retq ; ; BMI-LABEL: masked_merge0: @@ -56,11 +55,10 @@ define i8 @masked_merge2(i8 %a0, i8 %a1, i8 %a2) { ; CHECK-LABEL: masked_merge2: ; CHECK: # %bb.0: -; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: notb %al -; CHECK-NEXT: andb %sil, %al -; CHECK-NEXT: andb %dil, %sil -; CHECK-NEXT: orb %sil, %al +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: xorb %sil, %al +; CHECK-NEXT: andb %dil, %al +; CHECK-NEXT: xorb %sil, %al ; CHECK-NEXT: retq ; ; BMI-LABEL: masked_merge2: @@ -81,13 +79,12 @@ define i64 @masked_merge3(i64 %a0, i64 %a1, i64 %a2) { ; CHECK-LABEL: masked_merge3: ; CHECK: # %bb.0: -; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: notq %rsi +; CHECK-NEXT: movq %rsi, %rax ; CHECK-NEXT: notq %rdx -; CHECK-NEXT: andq %rdi, %rsi +; CHECK-NEXT: xorq %rdx, %rax ; CHECK-NEXT: notq %rax -; CHECK-NEXT: andq %rdx, %rax -; CHECK-NEXT: orq %rsi, %rax +; CHECK-NEXT: andq %rdi, %rax +; CHECK-NEXT: xorq %rdx, %rax ; CHECK-NEXT: retq ; ; BMI-LABEL: masked_merge3: