diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3209,6 +3209,19 @@ // 0 - X --> X if X is 0 or the minimum signed value. return N1; } + + // Convert 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)). + if (N1->getOpcode() == ISD::ABS && + !TLI.isOperationLegalOrCustom(ISD::ABS, VT)) { + SDValue X = N1->getOperand(0); + SDValue Shift = + DAG.getNode(ISD::SRA, DL, VT, X, + DAG.getConstant(BitWidth - 1, DL, getShiftAmountTy(VT))); + SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, Shift); + AddToWorklist(Shift.getNode()); + AddToWorklist(Xor.getNode()); + return DAG.getNode(ISD::SUB, DL, VT, Shift, Xor); + } } // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) diff --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll --- a/llvm/test/CodeGen/AArch64/neg-abs.ll +++ b/llvm/test/CodeGen/AArch64/neg-abs.ll @@ -7,9 +7,9 @@ define i64@neg_abs(i64 %x) { ; CHECK-LABEL: neg_abs: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 // =0 -; CHECK-NEXT: cneg x8, x0, mi -; CHECK-NEXT: neg x0, x8 +; CHECK-NEXT: asr x8, x0, #63 +; CHECK-NEXT: eor x9, x0, x8 +; CHECK-NEXT: sub x0, x8, x9 ; CHECK-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs diff --git a/llvm/test/CodeGen/PowerPC/neg-abs.ll b/llvm/test/CodeGen/PowerPC/neg-abs.ll --- a/llvm/test/CodeGen/PowerPC/neg-abs.ll +++ b/llvm/test/CodeGen/PowerPC/neg-abs.ll @@ -9,9 +9,8 @@ ; CHECK-LE-LABEL: neg_abs: ; CHECK-LE: # %bb.0: ; CHECK-LE-NEXT: sradi r4, r3, 63 -; CHECK-LE-NEXT: add r3, r3, r4 ; CHECK-LE-NEXT: xor r3, r3, r4 -; CHECK-LE-NEXT: neg r3, r3 +; CHECK-LE-NEXT: sub r3, r4, r3 ; CHECK-LE-NEXT: blr %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -9,18 +9,15 @@ ; RV32-LABEL: neg_abs32: ; RV32: # %bb.0: ; RV32-NEXT: srai a1, a0, 31 -; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: xor a0, a0, a1 -; RV32-NEXT: neg a0, a0 +; RV32-NEXT: sub a0, a1, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: neg_abs32: ; RV64: # %bb.0: -; RV64-NEXT: sext.w a1, a0 -; RV64-NEXT: srai a1, a1, 63 -; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: sraiw a1, a0, 31 ; RV64-NEXT: xor a0, a0, a1 -; RV64-NEXT: negw a0, a0 +; RV64-NEXT: subw a0, a1, a0 ; RV64-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) %neg = sub nsw i32 0, %abs @@ -30,25 +27,20 @@ define i64 @neg_abs64(i64 %x) { ; RV32-LABEL: neg_abs64: ; RV32: # %bb.0: -; RV32-NEXT: bgez a1, .LBB1_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: snez a2, a0 -; RV32-NEXT: add a1, a1, a2 -; RV32-NEXT: neg a1, a1 -; RV32-NEXT: neg a0, a0 -; RV32-NEXT: .LBB1_2: -; RV32-NEXT: snez a2, a0 -; RV32-NEXT: add a1, a1, a2 -; RV32-NEXT: neg a1, a1 -; RV32-NEXT: neg a0, a0 +; RV32-NEXT: srai a2, a1, 31 +; RV32-NEXT: xor a0, a0, a2 +; RV32-NEXT: sltu a3, a2, a0 +; RV32-NEXT: xor a1, a1, a2 +; RV32-NEXT: sub a1, a2, a1 +; RV32-NEXT: sub a1, a1, a3 +; RV32-NEXT: sub a0, a2, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: neg_abs64: ; RV64: # %bb.0: ; RV64-NEXT: srai a1, a0, 63 -; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: xor a0, a0, a1 -; RV64-NEXT: neg a0, a0 +; RV64-NEXT: sub a0, a1, a0 ; RV64-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs