Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -15458,21 +15458,32 @@ return SDValue(); SDLoc DL(N); - SDValue CCmp; + SDValue CCmp, Condition; + unsigned NZCV; if (N->getOpcode() == ISD::AND) { AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0); - SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC); - unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1); - SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); - CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0), - Cmp1.getOperand(1), NZCVOp, Condition, Cmp0); + Condition = DAG.getConstant(InvCC0, DL, MVT_CC); + NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1); } else { - SDLoc DL(N); AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1); - SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC); - unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1); - SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); + Condition = DAG.getConstant(CC0, DL, MVT_CC); + NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1); + } + + SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); + + auto *Op1 = dyn_cast(Cmp1.getOperand(1)); + if (Op1 && Op1->getAPIntValue().isNegative() && + Op1->getAPIntValue().sgt(-32)) { + // CCMP accept the constant int the range [0, 31] + // if the Op1 is a constant in the range [-31, -1], we + // can select to CCMN to avoid the extra mov + SDValue AbsOp1 = + DAG.getConstant(Op1->getAPIntValue().abs(), DL, Op1->getValueType(0)); + CCmp = DAG.getNode(AArch64ISD::CCMN, DL, MVT_CC, Cmp1.getOperand(0), AbsOp1, + NZCVOp, Condition, Cmp0); + } else { CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0), Cmp1.getOperand(1), NZCVOp, Condition, Cmp0); } Index: llvm/test/CodeGen/AArch64/arm64-ccmp.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-ccmp.ll +++ llvm/test/CodeGen/AArch64/arm64-ccmp.ll @@ -1194,14 +1194,11 @@ } declare i32 @callee(i32) -; FIXME: mov w8, #-2 + ccmp w1, w8, #0, eq -; --> ccmn w1, #2, #0, eq define i1 @cmp_and_negative_const(i32 %0, i32 %1) { ; SDISEL-LABEL: cmp_and_negative_const: ; SDISEL: ; %bb.0: ; SDISEL-NEXT: cmn w0, #1 -; SDISEL-NEXT: mov w8, #-2 -; SDISEL-NEXT: ccmp w1, w8, #0, eq +; SDISEL-NEXT: ccmn w1, #2, #0, eq ; SDISEL-NEXT: cset w0, eq ; SDISEL-NEXT: ret ; @@ -1219,14 +1216,11 @@ ret i1 %5 } -; FIXME: mov w8, #-2 + ccmp w1, w8, #4, ne -; --> ccmn w1, #2, #4, ne define i1 @cmp_or_negative_const(i32 %a, i32 %b) { ; SDISEL-LABEL: cmp_or_negative_const: ; SDISEL: ; %bb.0: ; SDISEL-NEXT: cmn w0, #1 -; SDISEL-NEXT: mov w8, #-2 -; SDISEL-NEXT: ccmp w1, w8, #4, ne +; SDISEL-NEXT: ccmn w1, #2, #4, ne ; SDISEL-NEXT: cset w0, eq ; SDISEL-NEXT: ret ;