diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3193,17 +3193,25 @@ return N1; } - // Convert 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)). + // Convert 0 - abs(x). if (N1->getOpcode() == ISD::ABS && !TLI.isOperationLegalOrCustom(ISD::ABS, VT)) { SDValue X = N1->getOperand(0); - SDValue Shift = - DAG.getNode(ISD::SRA, DL, VT, X, - DAG.getConstant(BitWidth - 1, DL, getShiftAmountTy(VT))); - SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, Shift); - AddToWorklist(Shift.getNode()); - AddToWorklist(Xor.getNode()); - return DAG.getNode(ISD::SUB, DL, VT, Shift, Xor); + if (TLI.isOperationLegal(ISD::SMIN, VT)) { + // Convert 0 - abs(x) -> smin (x, -x). + SDValue NegX = DAG.getNode(ISD::SUB, DL, VT, N0, X); + AddToWorklist(NegX.getNode()); + return DAG.getNode(ISD::SMIN, DL, VT, NegX, X); + } else { + // Convert 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)). + SDValue Shift = DAG.getNode( + ISD::SRA, DL, VT, X, + DAG.getConstant(BitWidth - 1, DL, getShiftAmountTy(VT))); + SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, Shift); + AddToWorklist(Shift.getNode()); + AddToWorklist(Xor.getNode()); + return DAG.getNode(ISD::SUB, DL, VT, Shift, Xor); + } } } diff --git a/llvm/test/CodeGen/PowerPC/neg-abs.ll b/llvm/test/CodeGen/PowerPC/neg-abs.ll --- a/llvm/test/CodeGen/PowerPC/neg-abs.ll +++ b/llvm/test/CodeGen/PowerPC/neg-abs.ll @@ -24,13 +24,9 @@ define <2 x i64> @neg_abs_v2i64(<2 x i64> %0) { ; CHECK-LE-LABEL: neg_abs_v2i64: ; CHECK-LE: # %bb.0: -; CHECK-LE-NEXT: addis r3, r2, .LCPI1_0@toc@ha -; CHECK-LE-NEXT: addi r3, r3, .LCPI1_0@toc@l -; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 -; CHECK-LE-NEXT: xxswapd vs35, vs0 -; CHECK-LE-NEXT: vsrad v3, v2, v3 -; CHECK-LE-NEXT: xxlxor vs34, vs34, vs35 -; CHECK-LE-NEXT: vsubudm v2, v3, v2 +; CHECK-LE-NEXT: xxlxor vs35, vs35, vs35 +; CHECK-LE-NEXT: vsubudm v3, v3, v2 +; CHECK-LE-NEXT: vminsd v2, v3, v2 ; CHECK-LE-NEXT: blr %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %0, i1 true) %neg.abs = sub <2 x i64> zeroinitializer, %abs @@ -40,12 +36,9 @@ define <4 x i32> @neg_abs_v4i32(<4 x i32> %0) { ; CHECK-LE-LABEL: neg_abs_v4i32: ; CHECK-LE: # %bb.0: -; CHECK-LE-NEXT: vspltisw v3, -16 -; CHECK-LE-NEXT: vspltisw v4, 15 -; CHECK-LE-NEXT: vsubuwm v3, v4, v3 -; CHECK-LE-NEXT: vsraw v3, v2, v3 -; CHECK-LE-NEXT: xxlxor vs34, vs34, vs35 -; CHECK-LE-NEXT: vsubuwm v2, v3, v2 +; CHECK-LE-NEXT: xxlxor vs35, vs35, vs35 +; CHECK-LE-NEXT: vsubuwm v3, v3, v2 +; CHECK-LE-NEXT: vminsw v2, v3, v2 ; CHECK-LE-NEXT: blr %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %0, i1 true) %neg.abs = sub <4 x i32> zeroinitializer, %abs @@ -55,10 +48,9 @@ define <8 x i16> @neg_abs_v8i16(<8 x i16> %0) { ; CHECK-LE-LABEL: neg_abs_v8i16: ; CHECK-LE: # %bb.0: -; CHECK-LE-NEXT: vspltish v3, 15 -; CHECK-LE-NEXT: vsrah v3, v2, v3 -; CHECK-LE-NEXT: xxlxor vs34, vs34, vs35 -; CHECK-LE-NEXT: vsubuhm v2, v3, v2 +; CHECK-LE-NEXT: xxlxor vs35, vs35, vs35 +; CHECK-LE-NEXT: vsubuhm v3, v3, v2 +; CHECK-LE-NEXT: vminsh v2, v3, v2 ; CHECK-LE-NEXT: blr %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %0, i1 true) %neg.abs = sub <8 x i16> zeroinitializer, %abs @@ -68,10 +60,9 @@ define <16 x i8> @neg_abs_v16i8(<16 x i8> %0) { ; CHECK-LE-LABEL: neg_abs_v16i8: ; CHECK-LE: # %bb.0: -; CHECK-LE-NEXT: vspltisb v3, 7 -; CHECK-LE-NEXT: vsrab v3, v2, v3 -; CHECK-LE-NEXT: xxlxor vs34, vs34, vs35 -; CHECK-LE-NEXT: vsububm v2, v3, v2 +; CHECK-LE-NEXT: xxlxor vs35, vs35, vs35 +; CHECK-LE-NEXT: vsububm v3, v3, v2 +; CHECK-LE-NEXT: vminsb v2, v3, v2 ; CHECK-LE-NEXT: blr %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %0, i1 true) %neg.abs = sub <16 x i8> zeroinitializer, %abs