diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -487,6 +487,14 @@ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override; + bool shouldFormOverflowOp(unsigned Opcode, EVT VT, + bool MathUsed) const override { + if (VT == MVT::i8 || VT == MVT::i16) + return false; + + return TargetLowering::shouldFormOverflowOp(Opcode, VT, MathUsed); + } + bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { return VT.isScalarInteger(); } diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll @@ -543,21 +543,17 @@ define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, ptr %p) { ; RV32-LABEL: uaddo_i8_increment_noncanonical_1: ; RV32: # %bb.0: -; RV32-NEXT: andi a0, a0, 255 ; RV32-NEXT: addi a2, a0, 1 ; RV32-NEXT: andi a0, a2, 255 -; RV32-NEXT: xor a0, a0, a2 -; RV32-NEXT: snez a0, a0 +; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: sb a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i8_increment_noncanonical_1: ; RV64: # %bb.0: -; RV64-NEXT: andi a0, a0, 255 -; RV64-NEXT: addi a2, a0, 1 +; RV64-NEXT: addiw a2, a0, 1 ; RV64-NEXT: andi a0, a2, 255 -; RV64-NEXT: xor a0, a0, a2 -; RV64-NEXT: snez a0, a0 +; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sb a2, 0(a1) ; RV64-NEXT: ret %a = add i8 1, %x ; commute @@ -589,26 +585,20 @@ define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, ptr %p) { ; RV32-LABEL: uaddo_i16_increment_noncanonical_3: ; RV32: # %bb.0: -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -1 -; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: addi a3, a0, 1 -; RV32-NEXT: and a2, a3, a2 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: snez a0, a2 -; RV32-NEXT: sh a3, 0(a1) +; RV32-NEXT: addi a2, a0, 1 +; RV32-NEXT: slli a0, a2, 16 +; RV32-NEXT: srli a0, a0, 16 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: sh a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo_i16_increment_noncanonical_3: ; RV64: # %bb.0: -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -1 -; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: addi a3, a0, 1 -; RV64-NEXT: and a2, a3, a2 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: snez a0, a2 -; RV64-NEXT: sh a3, 0(a1) +; RV64-NEXT: addiw a2, a0, 1 +; RV64-NEXT: slli a0, a2, 48 +; RV64-NEXT: srli a0, a0, 48 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: sh a2, 0(a1) ; RV64-NEXT: ret %a = add i16 1, %x ; commute %ov = icmp eq i16 0, %a ; commute @@ -1281,16 +1271,15 @@ ret void } -define i16 @overflow_not_used(i16 %a, i16 %b, ptr %res) nounwind ssp { +define i16 @overflow_not_used(i16 %a, i16 %b, ptr %res) { ; RV32-LABEL: overflow_not_used: ; RV32: # %bb.0: ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: addi a3, a3, -1 -; RV32-NEXT: and a0, a0, a3 ; RV32-NEXT: and a4, a1, a3 -; RV32-NEXT: add a0, a4, a0 +; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: and a3, a0, a3 -; RV32-NEXT: bne a3, a0, .LBB37_2 +; RV32-NEXT: bltu a3, a4, .LBB37_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a1, 42 ; RV32-NEXT: .LBB37_2: @@ -1302,11 +1291,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a3, 16 ; RV64-NEXT: addiw a3, a3, -1 -; RV64-NEXT: and a0, a0, a3 ; RV64-NEXT: and a4, a1, a3 -; RV64-NEXT: add a0, a4, a0 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: and a3, a0, a3 -; RV64-NEXT: bne a3, a0, .LBB37_2 +; RV64-NEXT: bltu a3, a4, .LBB37_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB37_2: