diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -477,6 +477,11 @@ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override; + bool shouldFormOverflowOp(unsigned Opcode, EVT VT, + bool MathUsed) const override { + return TargetLowering::shouldFormOverflowOp(Opcode, VT, false); + } + bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { return VT.isScalarInteger(); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8919,6 +8919,14 @@ SDValue N1 = N->getOperand(1); EVT VT = N->getValueType(0); EVT OpVT = N0.getValueType(); + SDLoc dl(N); + ISD::CondCode Cond = cast(N->getOperand(2))->get(); + if (OpVT == MVT::i64 && !Subtarget.is64Bit() && Cond == ISD::SETULT && + N0->getOpcode() == ISD::ADD && !isa(N1) && + N0->getOperand(1) == N1) { + // icmp ult i64 (add a, b), b -> icmp ult i64 (add a, b), a + return DAG.getSetCC(dl, VT, N0, N0->getOperand(0), Cond); + } if (OpVT != MVT::i64 || !Subtarget.is64Bit()) return SDValue(); @@ -8935,7 +8943,6 @@ return SDValue(); // Looking for an equality compare. - ISD::CondCode Cond = cast(N->getOperand(2))->get(); if (!isIntEqualitySetCC(Cond)) return SDValue(); @@ -8947,7 +8954,6 @@ const APInt &C1 = N1C->getAPIntValue(); - SDLoc dl(N); // If the constant is larger than 2^32 - 1 it is impossible for both sides // to be equal. if (C1.getActiveBits() > 32) diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll @@ -10,19 +10,16 @@ define i64 @uaddo1_overflow_used(i64 %a, i64 %b) nounwind ssp { ; RV32-LABEL: uaddo1_overflow_used: ; RV32: # %bb.0: -; RV32-NEXT: add a5, a3, a1 -; RV32-NEXT: add a4, a2, a0 -; RV32-NEXT: sltu a6, a4, a2 -; RV32-NEXT: add a5, a5, a6 -; RV32-NEXT: beq a5, a1, .LBB0_2 +; RV32-NEXT: add a1, a3, a1 +; RV32-NEXT: add a0, a2, a0 +; RV32-NEXT: sltu a0, a0, a2 +; RV32-NEXT: add a1, a1, a0 +; RV32-NEXT: beq a1, a3, .LBB0_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: sltu a0, a5, a1 -; RV32-NEXT: beqz a0, .LBB0_3 -; RV32-NEXT: j .LBB0_4 +; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: .LBB0_2: -; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: bnez a0, .LBB0_4 -; RV32-NEXT: .LBB0_3: +; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB0_4: ; RV32-NEXT: neg a1, a0 @@ -69,12 +66,12 @@ ; ; RV64-LABEL: uaddo1_math_overflow_used: ; RV64: # %bb.0: -; RV64-NEXT: add a0, a1, a0 -; RV64-NEXT: bltu a0, a1, .LBB1_2 +; RV64-NEXT: add a3, a1, a0 +; RV64-NEXT: bltu a3, a0, .LBB1_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB1_2: -; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: sd a3, 0(a2) ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a @@ -200,7 +197,7 @@ ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a1, a0, a2 ; RV32-NEXT: add a5, a5, a1 -; RV32-NEXT: beq a5, a3, .LBB5_2 +; RV32-NEXT: beq a3, a5, .LBB5_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a1, a5, a3 ; RV32-NEXT: .LBB5_2: @@ -519,16 +516,13 @@ define i1 @uaddo_i64_increment(i64 %x, ptr %p) { ; RV32-LABEL: uaddo_i64_increment: ; RV32: # %bb.0: -; RV32-NEXT: mv a3, a0 -; RV32-NEXT: addi a4, a0, 1 -; RV32-NEXT: sltu a0, a4, a0 -; RV32-NEXT: add a5, a1, a0 -; RV32-NEXT: bgeu a4, a3, .LBB12_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: sltu a0, a5, a1 -; RV32-NEXT: .LBB12_2: -; RV32-NEXT: sw a4, 0(a2) -; RV32-NEXT: sw a5, 4(a2) +; RV32-NEXT: addi a3, a0, 1 +; RV32-NEXT: sltu a0, a3, a0 +; RV32-NEXT: add a1, a1, a0 +; RV32-NEXT: or a0, a3, a1 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: sw a3, 0(a2) +; RV32-NEXT: sw a1, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i64_increment: @@ -546,21 +540,17 @@ define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, ptr %p) { ; RV32-LABEL: uaddo_i8_increment_noncanonical_1: ; RV32: # %bb.0: -; RV32-NEXT: andi a0, a0, 255 ; RV32-NEXT: addi a2, a0, 1 ; RV32-NEXT: andi a0, a2, 255 -; RV32-NEXT: xor a0, a0, a2 -; RV32-NEXT: snez a0, a0 +; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: sb a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i8_increment_noncanonical_1: ; RV64: # %bb.0: -; RV64-NEXT: andi a0, a0, 255 -; RV64-NEXT: addi a2, a0, 1 +; RV64-NEXT: addiw a2, a0, 1 ; RV64-NEXT: andi a0, a2, 255 -; RV64-NEXT: xor a0, a0, a2 -; RV64-NEXT: snez a0, a0 +; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sb a2, 0(a1) ; RV64-NEXT: ret %a = add i8 1, %x ; commute @@ -592,26 +582,20 @@ define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, ptr %p) { ; RV32-LABEL: uaddo_i16_increment_noncanonical_3: ; RV32: # %bb.0: -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, a0, 1 -; RV32-NEXT: and a2, a3, a2 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: snez a0, a2 -; RV32-NEXT: sh a3, 0(a1) +; RV32-NEXT: addi a2, a0, 1 +; RV32-NEXT: slli a0, a2, 16 +; RV32-NEXT: srli a0, a0, 16 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: sh a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i16_increment_noncanonical_3: ; RV64: # %bb.0: -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, a0, 1 -; RV64-NEXT: and a2, a3, a2 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: snez a0, a2 -; RV64-NEXT: sh a3, 0(a1) +; RV64-NEXT: addiw a2, a0, 1 +; RV64-NEXT: slli a0, a2, 48 +; RV64-NEXT: srli a0, a0, 48 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: sh a2, 0(a1) ; RV64-NEXT: ret %a = add i16 1, %x ; commute %ov = icmp eq i16 0, %a ; commute