diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -2614,6 +2614,11 @@ return true; } + // neg x with only low bit demanded is simply x. + if (Op.getOpcode() == ISD::SUB && DemandedBits.isOne() && + isa(Op0) && cast(Op0)->isZero()) + return TLO.CombineTo(Op, Op1); + // Attempt to avoid multi-use ops if we don't need anything from them. if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( diff --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll --- a/llvm/test/CodeGen/RISCV/alu64.ll +++ b/llvm/test/CodeGen/RISCV/alu64.ll @@ -59,8 +59,7 @@ ; RV32I-LABEL: sltiu: ; RV32I: # %bb.0: ; RV32I-NEXT: sltiu a0, a0, 3 -; RV32I-NEXT: snez a1, a1 -; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: seqz a1, a1 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll --- a/llvm/test/CodeGen/RISCV/bittest.ll +++ b/llvm/test/CodeGen/RISCV/bittest.ll @@ -301,9 +301,7 @@ ; RV32-NEXT: srl a1, a1, a0 ; RV32-NEXT: addi a0, a0, -32 ; RV32-NEXT: slti a0, a0, 0 -; RV32-NEXT: neg a0, a0 ; RV32-NEXT: and a0, a0, a1 -; RV32-NEXT: andi a0, a0, 1 ; RV32-NEXT: ret ; ; RV64I-LABEL: bittest_constant_by_var_shr_i64: @@ -335,9 +333,7 @@ ; RV32-NEXT: srl a1, a1, a0 ; RV32-NEXT: addi a0, a0, -32 ; RV32-NEXT: slti a0, a0, 0 -; RV32-NEXT: neg a0, a0 ; RV32-NEXT: and a0, a0, a1 -; RV32-NEXT: andi a0, a0, 1 ; RV32-NEXT: ret ; ; RV64I-LABEL: bittest_constant_by_var_shl_i64: diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll --- a/llvm/test/CodeGen/RISCV/forced-atomics.ll +++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll @@ -2806,8 +2806,7 @@ ; RV32-NEXT: .LBB52_2: # %atomicrmw.start ; RV32-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32-NEXT: sltiu a0, a4, 2 -; RV32-NEXT: snez a2, a1 -; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: seqz a2, a1 ; RV32-NEXT: and a0, a2, a0 ; RV32-NEXT: mv a2, a4 ; RV32-NEXT: bnez a0, .LBB52_1 diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -115,8 +115,7 @@ ; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunsdfdi@plt ; RV32IF-NEXT: sltiu a2, a0, -1 -; RV32IF-NEXT: snez a1, a1 -; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: seqz a1, a1 ; RV32IF-NEXT: and a1, a1, a2 ; RV32IF-NEXT: addi a1, a1, -1 ; RV32IF-NEXT: or a0, a1, a0 @@ -434,8 +433,7 @@ ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: call __fixunssfdi@plt ; RV32-NEXT: sltiu a2, a0, -1 -; RV32-NEXT: snez a1, a1 -; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: seqz a1, a1 ; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: addi a1, a1, -1 ; RV32-NEXT: or a0, a1, a0 @@ -1241,10 +1239,8 @@ ; RV32IF-NEXT: lw a1, 20(sp) ; RV32IF-NEXT: lw a2, 12(sp) ; RV32IF-NEXT: lw a3, 8(sp) -; RV32IF-NEXT: seqz a4, a0 -; RV32IF-NEXT: snez a5, a1 -; RV32IF-NEXT: addi a5, a5, -1 -; RV32IF-NEXT: and a4, a5, a4 +; RV32IF-NEXT: or a4, a1, a0 +; RV32IF-NEXT: seqz a4, a4 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: seqz a0, a0 @@ -1283,10 +1279,8 @@ ; RV32IFD-NEXT: lw a1, 20(sp) ; RV32IFD-NEXT: lw a2, 12(sp) ; RV32IFD-NEXT: lw a3, 8(sp) -; RV32IFD-NEXT: seqz a4, a0 -; RV32IFD-NEXT: snez a5, a1 -; RV32IFD-NEXT: addi a5, a5, -1 -; RV32IFD-NEXT: and a4, a5, a4 +; RV32IFD-NEXT: or a4, a1, a0 +; RV32IFD-NEXT: seqz a4, a4 ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: seqz a0, a0 @@ -1555,10 +1549,8 @@ ; RV32-NEXT: lw a1, 20(sp) ; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: addi a5, a5, -1 -; RV32-NEXT: and a4, a5, a4 +; RV32-NEXT: or a4, a1, a0 +; RV32-NEXT: seqz a4, a4 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: seqz a0, a0 @@ -1816,10 +1808,8 @@ ; RV32-NEXT: lw a1, 20(sp) ; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: addi a5, a5, -1 -; RV32-NEXT: and a4, a5, a4 +; RV32-NEXT: or a4, a1, a0 +; RV32-NEXT: seqz a4, a4 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: seqz a0, a0 @@ -3234,11 +3224,9 @@ ; RV32IF-NEXT: lw a1, 20(sp) ; RV32IF-NEXT: lw a2, 12(sp) ; RV32IF-NEXT: lw a3, 8(sp) -; RV32IF-NEXT: seqz a4, a0 -; RV32IF-NEXT: snez a5, a1 -; RV32IF-NEXT: addi a5, a5, -1 -; RV32IF-NEXT: and a4, a5, a4 -; RV32IF-NEXT: neg a4, a4 +; RV32IF-NEXT: or a4, a1, a0 +; RV32IF-NEXT: snez a4, a4 +; RV32IF-NEXT: addi a4, a4, -1 ; RV32IF-NEXT: and a3, a4, a3 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: or a0, a0, a1 @@ -3281,11 +3269,9 @@ ; RV32IFD-NEXT: lw a1, 20(sp) ; RV32IFD-NEXT: lw a2, 12(sp) ; RV32IFD-NEXT: lw a3, 8(sp) -; RV32IFD-NEXT: seqz a4, a0 -; RV32IFD-NEXT: snez a5, a1 -; RV32IFD-NEXT: addi a5, a5, -1 -; RV32IFD-NEXT: and a4, a5, a4 -; RV32IFD-NEXT: neg a4, a4 +; RV32IFD-NEXT: or a4, a1, a0 +; RV32IFD-NEXT: snez a4, a4 +; RV32IFD-NEXT: addi a4, a4, -1 ; RV32IFD-NEXT: and a3, a4, a3 ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: or a0, a0, a1 @@ -3601,11 +3587,9 @@ ; RV32-NEXT: lw a1, 20(sp) ; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: addi a5, a5, -1 -; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: neg a4, a4 +; RV32-NEXT: or a4, a1, a0 +; RV32-NEXT: snez a4, a4 +; RV32-NEXT: addi a4, a4, -1 ; RV32-NEXT: and a3, a4, a3 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a1 @@ -3914,11 +3898,9 @@ ; RV32-NEXT: lw a1, 20(sp) ; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: seqz a4, a0 -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: addi a5, a5, -1 -; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: neg a4, a4 +; RV32-NEXT: or a4, a1, a0 +; RV32-NEXT: snez a4, a4 +; RV32-NEXT: addi a4, a4, -1 ; RV32-NEXT: and a3, a4, a3 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a1