Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -659,7 +659,6 @@ SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -549,6 +549,8 @@ // We combine OR nodes for bitfield operations. setTargetDAGCombine(ISD::OR); + // Try to create BICs for vector ANDs. + setTargetDAGCombine(ISD::AND); // Vector add and sub nodes may conceal a high-half opportunity. // Also, try to fold ADD into CSINC/CSINV.. @@ -797,7 +799,6 @@ setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::SRL, VT, Custom); setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::AND, VT, Custom); setOperationAction(ISD::OR, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); @@ -2938,8 +2939,6 @@ return LowerCTPOP(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); - case ISD::AND: - return LowerVectorAND(Op, DAG); case ISD::OR: return LowerVectorOR(Op, DAG); case ISD::XOR: @@ -6920,46 +6919,6 @@ return SDValue(); } -SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op, - SelectionDAG &DAG) const { - SDValue LHS = Op.getOperand(0); - EVT VT = Op.getValueType(); - - BuildVectorSDNode *BVN = - dyn_cast(Op.getOperand(1).getNode()); - if (!BVN) { - // AND commutes, so try swapping the operands. - LHS = Op.getOperand(1); - BVN = dyn_cast(Op.getOperand(0).getNode()); - } - if (!BVN) - return Op; - - APInt DefBits(VT.getSizeInBits(), 0); - APInt UndefBits(VT.getSizeInBits(), 0); - if (resolveBuildVector(BVN, DefBits, UndefBits)) { - SDValue NewOp; - - // We only have BIC vector immediate instruction, which is and-not. - DefBits = ~DefBits; - if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, - DefBits, &LHS)) || - (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, - DefBits, &LHS))) - return NewOp; - - UndefBits = ~UndefBits; - if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, - UndefBits, &LHS)) || - (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, - UndefBits, &LHS))) - return NewOp; - } - - // We can always fall back to a non-immediate AND. - return Op; -} - // Specialized code to quickly find if PotentialBVec is a BuildVector that // consists of only the same constant int value, returned in reference arg // ConstVal @@ -9431,6 +9390,43 @@ return SDValue(); } +static SDValue performANDCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + SelectionDAG &DAG = DCI.DAG; + SDValue LHS = N->getOperand(0); + EVT VT = N->getValueType(0); + if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); + + BuildVectorSDNode *BVN = + dyn_cast(N->getOperand(1).getNode()); + if (!BVN) + return SDValue(); + + APInt DefBits(VT.getSizeInBits(), 0); + APInt UndefBits(VT.getSizeInBits(), 0); + if (resolveBuildVector(BVN, DefBits, UndefBits)) { + SDValue NewOp; + + // We only have BIC vector immediate instruction, which is and-not. + DefBits = ~DefBits; + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, + DefBits, &LHS)) || + (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, + DefBits, &LHS))) + return NewOp; + + UndefBits = ~UndefBits; + if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, + UndefBits, &LHS)) || + (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, + UndefBits, &LHS))) + return NewOp; + } + + return SDValue(); +} + static SDValue performSRLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; @@ -11281,6 +11277,8 @@ return performFDivCombine(N, DAG, DCI, Subtarget); case ISD::OR: return performORCombine(N, DCI, Subtarget); + case ISD::AND: + return performANDCombine(N, DCI); case ISD::SRL: return performSRLCombine(N, DCI); case ISD::INTRINSIC_WO_CHAIN: Index: llvm/test/CodeGen/AArch64/sat-add.ll =================================================================== --- llvm/test/CodeGen/AArch64/sat-add.ll +++ llvm/test/CodeGen/AArch64/sat-add.ll @@ -365,7 +365,6 @@ ; CHECK-NEXT: add v1.16b, v0.16b, v1.16b ; CHECK-NEXT: cmhi v0.16b, v0.16b, v1.16b ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <16 x i8> %x, @@ -382,7 +381,6 @@ ; CHECK-NEXT: add v1.16b, v0.16b, v1.16b ; CHECK-NEXT: cmhi v0.16b, v0.16b, v2.16b ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <16 x i8> %x, @@ -412,7 +410,6 @@ ; CHECK-NEXT: add v1.8h, v0.8h, v1.8h ; CHECK-NEXT: cmhi v0.8h, v0.8h, v1.8h ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <8 x i16> %x, @@ -429,7 +426,6 @@ ; CHECK-NEXT: add v1.8h, v0.8h, v1.8h ; CHECK-NEXT: cmhi v0.8h, v0.8h, v2.8h ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <8 x i16> %x, @@ -459,7 +455,6 @@ ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <4 x i32> %x, @@ -476,7 +471,6 @@ ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v2.4s ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <4 x i32> %x, @@ -510,7 +504,6 @@ ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v1.2d ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <2 x i64> %x, @@ -529,7 +522,6 @@ ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v2.2d ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <2 x i64> %x, @@ -558,7 +550,6 @@ ; CHECK-NEXT: add v1.16b, v0.16b, v1.16b ; CHECK-NEXT: cmhi v0.16b, v0.16b, v1.16b ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <16 x i8> %x, %y @@ -574,7 +565,6 @@ ; CHECK-NEXT: add v1.16b, v0.16b, v1.16b ; CHECK-NEXT: cmhi v0.16b, v0.16b, v2.16b ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <16 x i8> %y, @@ -604,7 +594,6 @@ ; CHECK-NEXT: add v1.8h, v0.8h, v1.8h ; CHECK-NEXT: cmhi v0.8h, v0.8h, v1.8h ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <8 x i16> %x, %y @@ -620,7 +609,6 @@ ; CHECK-NEXT: add v1.8h, v0.8h, v1.8h ; CHECK-NEXT: cmhi v0.8h, v0.8h, v2.8h ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <8 x i16> %y, @@ -650,7 +638,6 @@ ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <4 x i32> %x, %y @@ -666,7 +653,6 @@ ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v2.4s ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <4 x i32> %y, @@ -697,7 +683,6 @@ ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v1.2d ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %a = add <2 x i64> %x, %y @@ -713,7 +698,6 @@ ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v2.2d ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <2 x i64> %y, Index: llvm/test/CodeGen/AArch64/uadd_sat_vec.ll =================================================================== --- llvm/test/CodeGen/AArch64/uadd_sat_vec.ll +++ llvm/test/CodeGen/AArch64/uadd_sat_vec.ll @@ -405,7 +405,6 @@ ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v1.2d ; CHECK-NEXT: bic v1.16b, v1.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %z = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %x, <2 x i64> %y) @@ -420,9 +419,7 @@ ; CHECK-NEXT: cmhi v0.2d, v0.2d, v2.2d ; CHECK-NEXT: cmhi v1.2d, v1.2d, v3.2d ; CHECK-NEXT: bic v2.16b, v2.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: bic v3.16b, v3.16b, v1.16b -; CHECK-NEXT: bic v1.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-NEXT: orr v1.16b, v1.16b, v3.16b ; CHECK-NEXT: ret @@ -442,13 +439,9 @@ ; CHECK-NEXT: cmhi v2.2d, v2.2d, v6.2d ; CHECK-NEXT: cmhi v3.2d, v3.2d, v7.2d ; CHECK-NEXT: bic v4.16b, v4.16b, v0.16b -; CHECK-NEXT: bic v0.4s, #0 ; CHECK-NEXT: bic v5.16b, v5.16b, v1.16b -; CHECK-NEXT: bic v1.4s, #0 ; CHECK-NEXT: bic v6.16b, v6.16b, v2.16b -; CHECK-NEXT: bic v2.4s, #0 ; CHECK-NEXT: bic v7.16b, v7.16b, v3.16b -; CHECK-NEXT: bic v3.4s, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v4.16b ; CHECK-NEXT: orr v1.16b, v1.16b, v5.16b ; CHECK-NEXT: orr v2.16b, v2.16b, v6.16b