diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3318,13 +3318,15 @@ return Cmp.getValue(1); } -// Value is 1 if 'C' bit of NZCV is 1, else 0 -static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) { +// Value is 1 if 'C' bit of NZCV is 1, else 0 if !Invert. +static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG, + bool Invert) { assert(Flag.getResNo() == 1); SDLoc DL(Flag); SDValue Zero = DAG.getConstant(0, DL, VT); SDValue One = DAG.getConstant(1, DL, VT); - SDValue CC = DAG.getConstant(AArch64CC::HS, DL, MVT::i32); + unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS; + SDValue CC = DAG.getConstant(Cond, DL, MVT::i32); return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag); } @@ -3358,8 +3360,10 @@ SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS, OpRHS, OpCarryIn); - SDValue OutFlag = IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG) - : carryFlagToValue(Sum.getValue(1), VT1, DAG); + bool InvertCarry = Opcode == AArch64ISD::SBCS; + SDValue OutFlag = + IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG) + : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry); return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag); } diff --git a/llvm/test/CodeGen/AArch64/i128-math.ll b/llvm/test/CodeGen/AArch64/i128-math.ll --- a/llvm/test/CodeGen/AArch64/i128-math.ll +++ b/llvm/test/CodeGen/AArch64/i128-math.ll @@ -92,7 +92,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 ; CHECK-NEXT: sbcs x1, x1, x3 -; CHECK-NEXT: cset w8, hs +; CHECK-NEXT: cset w8, lo ; CHECK-NEXT: eor w2, w8, #0x1 ; CHECK-NEXT: ret %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y) @@ -110,7 +110,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 ; CHECK-NEXT: sbcs x1, x1, x3 -; CHECK-NEXT: cset w2, hs +; CHECK-NEXT: cset w2, lo ; CHECK-NEXT: ret %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y) %2 = extractvalue { i128, i1 } %1, 0 @@ -126,7 +126,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, hs +; CHECK-NEXT: cset w10, lo ; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x0, xzr, x8, ne ; CHECK-NEXT: csel x1, xzr, x9, ne diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll @@ -346,13 +346,13 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x2, x6 ; CHECK-NEXT: sbcs x9, x3, x7 -; CHECK-NEXT: cset w10, hs +; CHECK-NEXT: cset w10, lo ; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x2, xzr, x8, ne ; CHECK-NEXT: csel x3, xzr, x9, ne ; CHECK-NEXT: subs x8, x0, x4 ; CHECK-NEXT: sbcs x9, x1, x5 -; CHECK-NEXT: cset w10, hs +; CHECK-NEXT: cset w10, lo ; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x8, xzr, x8, ne ; CHECK-NEXT: csel x1, xzr, x9, ne