diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3356,7 +3356,7 @@ } // This lowering is inefficient, but it will get cleaned up by -// `performAddSubCombine` +// `foldOverflowCheck` static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { EVT VT0 = Op.getValue(0).getValueType(); EVT VT1 = Op.getValue(1).getValueType(); @@ -15424,6 +15424,46 @@ return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); } +static bool isCMP(SDValue Op) { + return Op.getOpcode() == AArch64ISD::SUBS && + !Op.getNode()->hasAnyUseOfValue(0); +} + +// (CSEL 1 0 CC Cond) => CC +// (CSEL 0 1 CC Cond) => !CC +static Optional getCSETCondCode(SDValue Op) { + if (Op.getOpcode() != AArch64ISD::CSEL) + return None; + auto CC = static_cast(Op.getConstantOperandVal(2)); + if (CC == AArch64CC::AL || CC == AArch64CC::NV) + return None; + SDValue OpLHS = Op.getOperand(0); + SDValue OpRHS = Op.getOperand(1); + if (isOneConstant(OpLHS) && isNullConstant(OpRHS)) + return CC; + if (isNullConstant(OpLHS) && isOneConstant(OpRHS)) + return getInvertedCondCode(CC); + + return None; +} + +// (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry) +// (SBC{S} l r (CMP (CSET LO carry) 1)) => (SBC{S} l r carry) +static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) { + SDValue CmpOp = Op->getOperand(2); + if (!(isCMP(CmpOp) && isOneConstant(CmpOp.getOperand(1)))) + return SDValue(); + + SDValue CsetOp = CmpOp->getOperand(0); + auto CC = getCSETCondCode(CsetOp); + if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO)) + return SDValue(); + + return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(), + Op->getOperand(0), Op->getOperand(1), + CsetOp.getOperand(3)); +} + static SDValue performAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { @@ -18631,6 +18671,12 @@ case ISD::ADD: case ISD::SUB: return performAddSubCombine(N, DCI, DAG); + case AArch64ISD::ADC: + case AArch64ISD::ADCS: + return foldOverflowCheck(N, DAG, /* IsAdd */ true); + case AArch64ISD::SBC: + case AArch64ISD::SBCS: + return foldOverflowCheck(N, DAG, /* IsAdd */ false); case ISD::XOR: return performXorCombine(N, DAG, DCI, Subtarget); case ISD::MUL: diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll --- a/llvm/test/CodeGen/AArch64/adc.ll +++ b/llvm/test/CodeGen/AArch64/adc.ll @@ -6,24 +6,16 @@ ; CHECK-LE-LABEL: test_simple: ; CHECK-LE: ; %bb.0: ; CHECK-LE-NEXT: adds x8, x0, x2 -; CHECK-LE-NEXT: cset w9, hs -; CHECK-LE-NEXT: cmp w9, #1 ; CHECK-LE-NEXT: adcs x9, x1, x3 ; CHECK-LE-NEXT: subs x0, x8, x4 -; CHECK-LE-NEXT: cset w8, lo -; CHECK-LE-NEXT: cmp w8, #1 ; CHECK-LE-NEXT: sbcs x1, x9, x5 ; CHECK-LE-NEXT: ret ; ; CHECK-BE-LABEL: test_simple: ; CHECK-BE: // %bb.0: ; CHECK-BE-NEXT: adds x8, x1, x3 -; CHECK-BE-NEXT: cset w9, hs -; CHECK-BE-NEXT: cmp w9, #1 ; CHECK-BE-NEXT: adcs x9, x0, x2 ; CHECK-BE-NEXT: subs x1, x8, x5 -; CHECK-BE-NEXT: cset w8, lo -; CHECK-BE-NEXT: cmp w8, #1 ; CHECK-BE-NEXT: sbcs x0, x9, x4 ; CHECK-BE-NEXT: ret @@ -38,16 +30,12 @@ ; CHECK-LE-LABEL: test_imm: ; CHECK-LE: ; %bb.0: ; CHECK-LE-NEXT: adds x0, x0, #12 -; CHECK-LE-NEXT: cset w8, hs -; CHECK-LE-NEXT: cmp w8, #1 ; CHECK-LE-NEXT: adcs x1, x1, xzr ; CHECK-LE-NEXT: ret ; ; CHECK-BE-LABEL: test_imm: ; CHECK-BE: // %bb.0: ; CHECK-BE-NEXT: adds x1, x1, #12 -; CHECK-BE-NEXT: cset w8, hs -; CHECK-BE-NEXT: cmp w8, #1 ; CHECK-BE-NEXT: adcs x0, x0, xzr ; CHECK-BE-NEXT: ret @@ -59,19 +47,15 @@ define i128 @test_shifted(i128 %a, i128 %b) { ; CHECK-LE-LABEL: test_shifted: ; CHECK-LE: ; %bb.0: -; CHECK-LE-NEXT: adds x0, x0, x2, lsl #45 ; CHECK-LE-NEXT: extr x8, x3, x2, #19 -; CHECK-LE-NEXT: cset w9, hs -; CHECK-LE-NEXT: cmp w9, #1 +; CHECK-LE-NEXT: adds x0, x0, x2, lsl #45 ; CHECK-LE-NEXT: adcs x1, x1, x8 ; CHECK-LE-NEXT: ret ; ; CHECK-BE-LABEL: test_shifted: ; CHECK-BE: // %bb.0: -; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45 ; CHECK-BE-NEXT: extr x8, x2, x3, #19 -; CHECK-BE-NEXT: cset w9, hs -; CHECK-BE-NEXT: cmp w9, #1 +; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45 ; CHECK-BE-NEXT: adcs x0, x0, x8 ; CHECK-BE-NEXT: ret @@ -90,8 +74,6 @@ ; CHECK-LE-NEXT: adds x0, x0, w2, sxth #3 ; CHECK-LE-NEXT: asr x9, x8, #63 ; CHECK-LE-NEXT: extr x8, x9, x8, #61 -; CHECK-LE-NEXT: cset w9, hs -; CHECK-LE-NEXT: cmp w9, #1 ; CHECK-LE-NEXT: adcs x1, x1, x8 ; CHECK-LE-NEXT: ret ; @@ -102,8 +84,6 @@ ; CHECK-BE-NEXT: adds x1, x1, w2, sxth #3 ; CHECK-BE-NEXT: asr x9, x8, #63 ; CHECK-BE-NEXT: extr x8, x9, x8, #61 -; CHECK-BE-NEXT: cset w9, hs -; CHECK-BE-NEXT: cmp w9, #1 ; CHECK-BE-NEXT: adcs x0, x0, x8 ; CHECK-BE-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll --- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll +++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll @@ -9,8 +9,6 @@ ; CHECK-NEXT: lsr x9, x1, #32 ; CHECK-NEXT: cmn x3, x2 ; CHECK-NEXT: mul x8, x8, x9 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x0, x8, xzr ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll --- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll @@ -260,8 +260,6 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldaxp x9, x8, [x0] ; CHECK-NEXT: adds x10, x9, x2 -; CHECK-NEXT: cset w11, hs -; CHECK-NEXT: cmp w11, #1 ; CHECK-NEXT: adcs x11, x8, x3 ; CHECK-NEXT: stlxp w12, x10, x11, [x0] ; CHECK-NEXT: cbnz w12, .LBB6_1 @@ -283,8 +281,6 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldaxp x9, x8, [x0] ; CHECK-NEXT: subs x10, x9, x2 -; CHECK-NEXT: cset w11, lo -; CHECK-NEXT: cmp w11, #1 ; CHECK-NEXT: sbcs x11, x8, x3 ; CHECK-NEXT: stlxp w12, x10, x11, [x0] ; CHECK-NEXT: cbnz w12, .LBB7_1 diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -1749,35 +1749,27 @@ ; CHECK-LABEL: uabd_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov x11, d1 ; CHECK-NEXT: mov.d x8, v0[1] -; CHECK-NEXT: mov.d x11, v1[1] -; CHECK-NEXT: subs x12, x9, x10 -; CHECK-NEXT: asr x9, x9, #63 -; CHECK-NEXT: cset w13, lo -; CHECK-NEXT: asr x10, x10, #63 -; CHECK-NEXT: cmp w13, #1 -; CHECK-NEXT: sbcs x9, x9, x10 -; CHECK-NEXT: subs x10, x8, x11 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: cset w13, lo -; CHECK-NEXT: asr x11, x11, #63 -; CHECK-NEXT: cmp w13, #1 -; CHECK-NEXT: sbcs x8, x8, x11 -; CHECK-NEXT: asr x11, x8, #63 -; CHECK-NEXT: eor x10, x10, x11 -; CHECK-NEXT: eor x8, x8, x11 -; CHECK-NEXT: subs x2, x10, x11 -; CHECK-NEXT: asr x10, x9, #63 -; CHECK-NEXT: cset w13, lo -; CHECK-NEXT: eor x12, x12, x10 -; CHECK-NEXT: cmp w13, #1 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: sbcs x3, x8, x11 -; CHECK-NEXT: subs x8, x12, x10 -; CHECK-NEXT: cset w11, lo -; CHECK-NEXT: cmp w11, #1 -; CHECK-NEXT: sbcs x1, x9, x10 +; CHECK-NEXT: mov.d x10, v1[1] +; CHECK-NEXT: asr x12, x9, #63 +; CHECK-NEXT: asr x13, x11, #63 +; CHECK-NEXT: subs x9, x9, x11 +; CHECK-NEXT: sbcs x11, x12, x13 +; CHECK-NEXT: asr x12, x8, #63 +; CHECK-NEXT: asr x13, x10, #63 +; CHECK-NEXT: subs x8, x8, x10 +; CHECK-NEXT: sbcs x10, x12, x13 +; CHECK-NEXT: asr x13, x11, #63 +; CHECK-NEXT: asr x12, x10, #63 +; CHECK-NEXT: eor x8, x8, x12 +; CHECK-NEXT: eor x10, x10, x12 +; CHECK-NEXT: subs x2, x8, x12 +; CHECK-NEXT: eor x8, x9, x13 +; CHECK-NEXT: sbcs x3, x10, x12 +; CHECK-NEXT: eor x9, x11, x13 +; CHECK-NEXT: subs x8, x8, x13 +; CHECK-NEXT: sbcs x1, x9, x13 ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov.d v0[1], x1 ; CHECK-NEXT: fmov x0, d0 diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll --- a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll +++ b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll @@ -219,8 +219,6 @@ ; NOLSE-NEXT: ldr x8, [sp, #32] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x13, [sp, #24] // 8-byte Folded Reload ; NOLSE-NEXT: adds x14, x8, #1 -; NOLSE-NEXT: cset w9, hs -; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: mov x9, xzr ; NOLSE-NEXT: adcs x15, x11, x9 ; NOLSE-NEXT: .LBB4_2: // %atomicrmw.start @@ -274,8 +272,6 @@ ; LSE-NEXT: ldr x8, [sp, #64] // 8-byte Folded Reload ; LSE-NEXT: ldr x9, [sp, #56] // 8-byte Folded Reload ; LSE-NEXT: adds x2, x8, #1 -; LSE-NEXT: cset w11, hs -; LSE-NEXT: subs w11, w11, #1 ; LSE-NEXT: mov x11, xzr ; LSE-NEXT: adcs x11, x10, x11 ; LSE-NEXT: // kill: def $x2 killed $x2 def $x2_x3 diff --git a/llvm/test/CodeGen/AArch64/i128-math.ll b/llvm/test/CodeGen/AArch64/i128-math.ll --- a/llvm/test/CodeGen/AArch64/i128-math.ll +++ b/llvm/test/CodeGen/AArch64/i128-math.ll @@ -23,8 +23,6 @@ ; CHECK-LABEL: u128_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: ret %1 = add i128 %x, %y @@ -35,8 +33,6 @@ ; CHECK-LABEL: u128_checked_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: cset w8, hs ; CHECK-NEXT: eor w2, w8, #0x1 @@ -55,8 +51,6 @@ ; CHECK-LABEL: u128_overflowing_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: cset w2, hs ; CHECK-NEXT: ret @@ -73,8 +67,6 @@ ; CHECK-LABEL: u128_saturating_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x0, x2 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x1, x3 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 @@ -89,8 +81,6 @@ ; CHECK-LABEL: u128_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: ret %1 = sub i128 %x, %y @@ -101,8 +91,6 @@ ; CHECK-LABEL: u128_checked_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: cset w8, hs ; CHECK-NEXT: eor w2, w8, #0x1 @@ -121,8 +109,6 @@ ; CHECK-LABEL: u128_overflowing_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: cset w2, hs ; CHECK-NEXT: ret @@ -139,8 +125,6 @@ ; CHECK-LABEL: u128_saturating_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x1, x3 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 @@ -155,8 +139,6 @@ ; CHECK-LABEL: i128_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: ret %1 = add i128 %x, %y @@ -167,8 +149,6 @@ ; CHECK-LABEL: i128_checked_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: cset w8, vs ; CHECK-NEXT: eor w2, w8, #0x1 @@ -187,8 +167,6 @@ ; CHECK-LABEL: i128_overflowing_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: cset w2, vs ; CHECK-NEXT: ret @@ -205,8 +183,6 @@ ; CHECK-LABEL: i128_saturating_add: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x0, x2 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x1, x3 ; CHECK-NEXT: asr x10, x9, #63 ; CHECK-NEXT: cset w11, vs @@ -223,8 +199,6 @@ ; CHECK-LABEL: i128_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: ret %1 = sub i128 %x, %y @@ -235,8 +209,6 @@ ; CHECK-LABEL: i128_checked_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: cset w8, vs ; CHECK-NEXT: eor w2, w8, #0x1 @@ -255,8 +227,6 @@ ; CHECK-LABEL: i128_overflowing_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x0, x0, x2 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: sbcs x1, x1, x3 ; CHECK-NEXT: cset w2, vs ; CHECK-NEXT: ret @@ -273,8 +243,6 @@ ; CHECK-LABEL: i128_saturating_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x1, x3 ; CHECK-NEXT: asr x10, x9, #63 ; CHECK-NEXT: cset w11, vs diff --git a/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll --- a/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll +++ b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll @@ -11,8 +11,6 @@ ; CHECK-NEXT: .LBB0_1: // %loop ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: adds x0, x0, #1 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, xzr ; CHECK-NEXT: orr x8, x1, x0, lsr #60 ; CHECK-NEXT: cbnz x8, .LBB0_1 diff --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll --- a/llvm/test/CodeGen/AArch64/neg-abs.ll +++ b/llvm/test/CodeGen/AArch64/neg-abs.ll @@ -52,8 +52,6 @@ ; CHECK-NEXT: eor x9, x0, x8 ; CHECK-NEXT: eor x10, x1, x8 ; CHECK-NEXT: subs x0, x8, x9 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x1, x8, x10 ; CHECK-NEXT: ret %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true) @@ -101,8 +99,6 @@ ; CHECK-NEXT: eor x9, x0, x8 ; CHECK-NEXT: eor x10, x1, x8 ; CHECK-NEXT: subs x0, x9, x8 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x1, x10, x8 ; CHECK-NEXT: ret %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true) diff --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll --- a/llvm/test/CodeGen/AArch64/neon-abd.ll +++ b/llvm/test/CodeGen/AArch64/neon-abd.ll @@ -147,24 +147,20 @@ ; CHECK-NEXT: mov x8, v0.d[1] ; CHECK-NEXT: fmov x10, d0 ; CHECK-NEXT: mov x9, v1.d[1] -; CHECK-NEXT: subs x11, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: cset w12, lo -; CHECK-NEXT: asr x9, x9, #63 -; CHECK-NEXT: cmp w12, #1 -; CHECK-NEXT: fmov x12, d1 -; CHECK-NEXT: sbcs x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: subs x9, x10, x12 -; CHECK-NEXT: asr x10, x10, #63 -; CHECK-NEXT: cset w13, lo -; CHECK-NEXT: asr x12, x12, #63 -; CHECK-NEXT: cmp w13, #1 -; CHECK-NEXT: eor x11, x11, x8 -; CHECK-NEXT: sbcs x10, x10, x12 -; CHECK-NEXT: sub x8, x11, x8 +; CHECK-NEXT: asr x11, x10, #63 +; CHECK-NEXT: asr x12, x8, #63 +; CHECK-NEXT: asr x13, x9, #63 +; CHECK-NEXT: subs x8, x8, x9 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: sbcs x12, x12, x13 +; CHECK-NEXT: asr x13, x9, #63 +; CHECK-NEXT: subs x9, x10, x9 +; CHECK-NEXT: sbcs x10, x11, x13 +; CHECK-NEXT: asr x11, x12, #63 ; CHECK-NEXT: asr x10, x10, #63 +; CHECK-NEXT: eor x8, x8, x11 ; CHECK-NEXT: eor x9, x9, x10 +; CHECK-NEXT: sub x8, x8, x11 ; CHECK-NEXT: sub x9, x9, x10 ; CHECK-NEXT: fmov d1, x8 ; CHECK-NEXT: fmov d0, x9 @@ -332,21 +328,17 @@ ; CHECK-NEXT: fmov x10, d0 ; CHECK-NEXT: mov x9, v1.d[1] ; CHECK-NEXT: subs x8, x8, x9 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: fmov x9, d1 ; CHECK-NEXT: ngcs x11, xzr ; CHECK-NEXT: asr x11, x11, #63 ; CHECK-NEXT: subs x9, x10, x9 ; CHECK-NEXT: eor x8, x8, x11 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sub x8, x8, x11 -; CHECK-NEXT: cmp w10, #1 ; CHECK-NEXT: ngcs x10, xzr +; CHECK-NEXT: sub x8, x8, x11 ; CHECK-NEXT: asr x10, x10, #63 -; CHECK-NEXT: fmov d1, x8 ; CHECK-NEXT: eor x9, x9, x10 ; CHECK-NEXT: sub x9, x9, x10 +; CHECK-NEXT: fmov d1, x8 ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], v1.d[0] ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/nzcv-save.ll b/llvm/test/CodeGen/AArch64/nzcv-save.ll --- a/llvm/test/CodeGen/AArch64/nzcv-save.ll +++ b/llvm/test/CodeGen/AArch64/nzcv-save.ll @@ -6,27 +6,22 @@ define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp { ; CHECK-LABEL: f: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ldp x8, x10, [x2] -; CHECK-NEXT: ldp x9, x11, [x3] -; CHECK-NEXT: ldp x12, x13, [x2, #16] -; CHECK-NEXT: adds x8, x8, x9 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 -; CHECK-NEXT: adcs x10, x10, x11 -; CHECK-NEXT: orr x14, x13, #0x100 -; CHECK-NEXT: cset w11, hs -; CHECK-NEXT: cmp w11, #1 -; CHECK-NEXT: ldp x9, x11, [x3, #16] -; CHECK-NEXT: stp x8, x10, [x0] -; CHECK-NEXT: adcs x9, x12, x9 -; CHECK-NEXT: cset w12, hs -; CHECK-NEXT: cmp w12, #1 -; CHECK-NEXT: adcs x13, x13, x11 -; CHECK-NEXT: cmp w12, #1 -; CHECK-NEXT: adcs x11, x14, x11 -; CHECK-NEXT: stp x9, x13, [x0, #16] -; CHECK-NEXT: stp x8, x10, [x1] -; CHECK-NEXT: stp x9, x11, [x1, #16] +; CHECK-NEXT: ldp x9, x8, [x2] +; CHECK-NEXT: ldp x11, x10, [x3] +; CHECK-NEXT: adds x9, x9, x11 +; CHECK-NEXT: ldp x12, x11, [x2, #16] +; CHECK-NEXT: adcs x8, x8, x10 +; CHECK-NEXT: ldp x13, x10, [x3, #16] +; CHECK-NEXT: adcs x12, x12, x13 +; CHECK-NEXT: mrs x13, NZCV +; CHECK-NEXT: adcs x14, x11, x10 +; CHECK-NEXT: orr x11, x11, #0x100 +; CHECK-NEXT: msr NZCV, x13 +; CHECK-NEXT: stp x9, x8, [x0] +; CHECK-NEXT: adcs x10, x11, x10 +; CHECK-NEXT: stp x12, x14, [x0, #16] +; CHECK-NEXT: stp x9, x8, [x1] +; CHECK-NEXT: stp x12, x10, [x1, #16] ; CHECK-NEXT: ret entry: %c = load i256, i256* %cc diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll @@ -351,8 +351,6 @@ ; CHECK-LABEL: v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x2, x6 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x3, x7 ; CHECK-NEXT: cset w10, vs ; CHECK-NEXT: asr x11, x9, #63 @@ -361,8 +359,6 @@ ; CHECK-NEXT: eor x8, x11, #0x8000000000000000 ; CHECK-NEXT: csel x3, x8, x9, ne ; CHECK-NEXT: adds x8, x0, x4 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x1, x5 ; CHECK-NEXT: cset w10, vs ; CHECK-NEXT: asr x11, x9, #63 diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll @@ -354,8 +354,6 @@ ; CHECK-LABEL: v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x2, x6 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x3, x7 ; CHECK-NEXT: cset w10, vs ; CHECK-NEXT: asr x11, x9, #63 @@ -364,8 +362,6 @@ ; CHECK-NEXT: eor x8, x11, #0x8000000000000000 ; CHECK-NEXT: csel x3, x8, x9, ne ; CHECK-NEXT: subs x8, x0, x4 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x1, x5 ; CHECK-NEXT: cset w10, vs ; CHECK-NEXT: asr x11, x9, #63 diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll @@ -349,16 +349,12 @@ ; CHECK-LABEL: v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x2, x6 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x3, x7 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csinv x2, x8, xzr, eq ; CHECK-NEXT: csinv x3, x9, xzr, eq ; CHECK-NEXT: adds x8, x0, x4 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x1, x5 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll @@ -345,16 +345,12 @@ ; CHECK-LABEL: v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x2, x6 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x3, x7 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x2, xzr, x8, ne ; CHECK-NEXT: csel x3, xzr, x9, ne ; CHECK-NEXT: subs x8, x0, x4 -; CHECK-NEXT: cset w9, lo -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: sbcs x9, x1, x5 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: cmp w10, #0 diff --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll --- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll +++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll @@ -277,13 +277,9 @@ ; CHECK-LABEL: uaddo_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x2, x6 -; CHECK-NEXT: cset w9, hs -; CHECK-NEXT: cmp w9, #1 ; CHECK-NEXT: adcs x9, x3, x7 ; CHECK-NEXT: cset w10, hs ; CHECK-NEXT: adds x11, x0, x4 -; CHECK-NEXT: cset w12, hs -; CHECK-NEXT: cmp w12, #1 ; CHECK-NEXT: adcs x12, x1, x5 ; CHECK-NEXT: cset w13, hs ; CHECK-NEXT: fmov s0, w13 diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll @@ -149,8 +149,6 @@ ; CHECK-LABEL: test_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: adds x0, x0, x2 -; CHECK-NEXT: cset w8, hs -; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: adcs x1, x1, x3 ; CHECK-NEXT: ret %b = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a)