diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -3166,6 +3166,11 @@ SDValue Or = DAG.getNode(ISD::OR, dl, Lo.getValueType(), Lo, Hi); Ovf = DAG.getSetCC(dl, N->getValueType(1), Or, DAG.getConstant(0, dl, Lo.getValueType()), ISD::SETEQ); + } else if (N->getOpcode() == ISD::UADDO && isAllOnesConstant(RHS)) { + // Special case: uaddo X, -1 overflows if X == 0. + Ovf = + DAG.getSetCC(dl, N->getValueType(1), LHS, + DAG.getConstant(0, dl, LHS.getValueType()), ISD::SETNE); } else { // Calculate the overflow: addition overflows iff a + b < a, and // subtraction overflows iff a - b > a. diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -9968,6 +9968,11 @@ SetCC = DAG.getSetCC(dl, SetCCType, Result, DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); + } else if (IsAdd && isAllOnesConstant(RHS)) { + // Special case: uaddo X, -1 overflows if X != 0. + SetCC = + DAG.getSetCC(dl, SetCCType, LHS, + DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETNE); } else { ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll @@ -663,25 +663,22 @@ define i1 @uaddo_i64_decrement_alt(i64 %x, ptr %p) { ; RV32-LABEL: uaddo_i64_decrement_alt: ; RV32: # %bb.0: -; RV32-NEXT: addi a3, a0, -1 +; RV32-NEXT: or a3, a0, a1 +; RV32-NEXT: snez a3, a3 ; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: sub a4, a1, a4 -; RV32-NEXT: bnez a0, .LBB18_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: sltu a0, a4, a1 -; RV32-NEXT: j .LBB18_3 -; RV32-NEXT: .LBB18_2: -; RV32-NEXT: sltu a0, a3, a0 -; RV32-NEXT: .LBB18_3: -; RV32-NEXT: sw a3, 0(a2) -; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: sub a1, a1, a4 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: sw a1, 4(a2) +; RV32-NEXT: mv a0, a3 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i64_decrement_alt: ; RV64: # %bb.0: -; RV64-NEXT: addi a2, a0, -1 -; RV64-NEXT: sltu a0, a2, a0 -; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: snez a2, a0 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sd a0, 0(a1) +; RV64-NEXT: mv a0, a2 ; RV64-NEXT: ret %a = add i64 %x, -1 store i64 %a, ptr %p @@ -694,25 +691,22 @@ define i1 @uaddo_i64_decrement_alt_dom(i64 %x, ptr %p) { ; RV32-LABEL: uaddo_i64_decrement_alt_dom: ; RV32: # %bb.0: -; RV32-NEXT: addi a3, a0, -1 +; RV32-NEXT: or a3, a0, a1 +; RV32-NEXT: snez a3, a3 ; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: sub a4, a1, a4 -; RV32-NEXT: bnez a0, .LBB19_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: sltu a0, a4, a1 -; RV32-NEXT: j .LBB19_3 -; RV32-NEXT: .LBB19_2: -; RV32-NEXT: sltu a0, a3, a0 -; RV32-NEXT: .LBB19_3: -; RV32-NEXT: sw a3, 0(a2) -; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: sub a1, a1, a4 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: sw a1, 4(a2) +; RV32-NEXT: mv a0, a3 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i64_decrement_alt_dom: ; RV64: # %bb.0: -; RV64-NEXT: addi a2, a0, -1 -; RV64-NEXT: sltu a0, a2, a0 -; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: snez a2, a0 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sd a0, 0(a1) +; RV64-NEXT: mv a0, a2 ; RV64-NEXT: ret %ov = icmp ne i64 %x, 0 %a = add i64 %x, -1