diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3523,7 +3523,7 @@ } // fold Y = sra (X, size(X)-1); sub (xor (X, Y), Y) -> (abs X) - if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) { + if (hasOperation(ISD::ABS, VT)) { if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) { SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1); SDValue S0 = N1.getOperand(0); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -250,6 +250,9 @@ setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); + if (!Subtarget.hasStdExtZbb()) + setOperationAction(ISD::ABS, XLenVT, Custom); + if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || Subtarget.hasStdExtZbkb()) { if (Subtarget.is64Bit()) { @@ -5735,6 +5738,19 @@ MVT VT = Op.getSimpleValueType(); SDValue X = Op.getOperand(0); + if (VT.isScalarInteger()) { + assert(!Subtarget.hasStdExtZbb() && "Unexpected custom legalisation"); + // Default lowering uses + // Y = sra (X, size(X)-1); xor (add (X, Y), Y); + // We want to use + // Y = sra (X, size(X)-1); sub (xor (X, Y), Y) + // This allows sign_extend_inreg to be combined with the sub. + SDValue Shift = DAG.getNode( + ISD::SRA, DL, VT, X, DAG.getConstant(VT.getSizeInBits() - 1, DL, VT)); + SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, Shift); + return DAG.getNode(ISD::SUB, DL, VT, Xor, Shift); + } + assert(VT.isFixedLengthVector() && "Unexpected type"); MVT ContainerVT = getContainerForFixedLengthVector(VT); diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -732,8 +732,8 @@ ; RV32I-LABEL: abs_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 -; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs_i32: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -942,8 +942,8 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: srai a1, a0, 63 -; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i32: @@ -961,9 +961,8 @@ ; RV64I-LABEL: abs_i32_sext: ; RV64I: # %bb.0: ; RV64I-NEXT: srai a1, a0, 63 -; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i32_sext: @@ -982,8 +981,8 @@ ; RV64I-LABEL: abs_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: srai a1, a0, 63 -; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i64: