Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -155,6 +155,7 @@ // Scalar logical instructions with not operand SBIC, + SEON, // Conditional compares. Operands: left,right,falsecc,cc,flags CCMP, Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2264,6 +2264,7 @@ MAKE_CASE(AArch64ISD::SBCS) MAKE_CASE(AArch64ISD::ANDS) MAKE_CASE(AArch64ISD::SBIC) + MAKE_CASE(AArch64ISD::SEON) MAKE_CASE(AArch64ISD::CCMP) MAKE_CASE(AArch64ISD::CCMN) MAKE_CASE(AArch64ISD::FCCMP) @@ -14971,6 +14972,49 @@ return SDValue(); } +// ((X & Z) ^ Y) ^ Z --> eon Y, (X | ~Z) +static SDValue performXORCombineWithNotOp(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + if (VT != MVT::i32 && VT != MVT::i64) + return SDValue(); + + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + + SDLoc DL(N); + SDValue X, Y, NewZ; + auto canGetNotFromXorAnd = [&](SDNode *N, SDValue Op0, SDValue Op1) { + if (Op0.getOpcode() != ISD::XOR || !Op0.hasOneUse()) + return false; + + Y = Op0.getOperand(1); + if (isa(Y)) + return false; + + SDValue AndVal = Op0.getOperand(0); + if (AndVal.getOpcode() != ISD::AND || !AndVal.hasOneUse()) + return false; + + if (Op1 == AndVal.getOperand(0)) { + NewZ = DAG.getNOT(DL, Op1, VT); + X = AndVal.getOperand(1); + return true; + } else if (Op1 == AndVal.getOperand(1)) { + NewZ = DAG.getNOT(DL, Op1, VT); + X = AndVal.getOperand(0); + return true; + } + + return false; + }; + + if (canGetNotFromXorAnd(N, LHS, RHS) || canGetNotFromXorAnd(N, RHS, LHS)) { + SDValue OrVal = DAG.getNode(ISD::OR, DL, VT, X, NewZ); + return DAG.getNode(AArch64ISD::SEON, DL, VT, Y, OrVal); + } + + return SDValue(); +} static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, @@ -14978,6 +15022,9 @@ if (DCI.isBeforeLegalizeOps()) return SDValue(); + if (SDValue R = performXORCombineWithNotOp(N, DAG)) + return R; + return foldVectorXorShiftIntoCmp(N, DAG, Subtarget); } Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -614,6 +614,7 @@ def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>; def AArch64sbic : SDNode<"AArch64ISD::SBIC", SDTIntBinOp>; +def AArch64seon : SDNode<"AArch64ISD::SEON", SDTIntBinOp>; def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>; @@ -2240,6 +2241,7 @@ } defm : LogicalRegPat; +defm : LogicalRegPat; //===----------------------------------------------------------------------===// Index: llvm/test/CodeGen/AArch64/logical-op-with-not.ll =================================================================== --- llvm/test/CodeGen/AArch64/logical-op-with-not.ll +++ llvm/test/CodeGen/AArch64/logical-op-with-not.ll @@ -197,9 +197,8 @@ define i64 @_Z6or_eon(i64 %0, i64 %1) { ; CHECK-LABEL: _Z6or_eon: ; CHECK: // %bb.0: -; CHECK-NEXT: and x8, x0, #0xfffffffffffff000 -; CHECK-NEXT: eor x8, x8, x1 -; CHECK-NEXT: eor x0, x8, #0xfffffffffffff000 +; CHECK-NEXT: orr x8, x0, #0xfff +; CHECK-NEXT: eon x0, x1, x8 ; CHECK-NEXT: ret %3 = and i64 %0, -4096 %4 = xor i64 %3, %1 @@ -234,3 +233,15 @@ %6 = xor i64 %5, %1 ret i64 %6 } + +define i64 @_Z6or_eon4(i64 %0, i64 %1, i64 %2) { +; CHECK-LABEL: _Z6or_eon4: +; CHECK: // %bb.0: +; CHECK-NEXT: orn x8, x0, x2 +; CHECK-NEXT: eon x0, x1, x8 +; CHECK-NEXT: ret + %4 = and i64 %0, %2 + %5 = xor i64 %4, %1 + %6 = xor i64 %5, %2 + ret i64 %6 +}