Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5838,6 +5838,43 @@ return SDValue(); } +// Transform (add (mul x, c0), c1) -> +// (add (mul (add x, c1/c0), c0), c1%c0). +// if c1/c0 and c1%c0 are simm12, while c1 is not. +// Or transform (add (mul x, c0), c1) -> +// (mul (add x, c1/c0), c0). +// if c1%c0 is zero, and c1/c0 is simm12 while c1 is not. +static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG, + unsigned XLen) { + // The type must be an integral scalar type, and does not exceeds XLen. + EVT VT = N->getValueType(0); + if (!VT.isInteger() || VT.isVector() || VT.getSizeInBits() > XLen) + return SDValue(); + // The first operand node must be a MUL and has no other use. + SDValue N0 = N->getOperand(0); + if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL) + return SDValue(); + // Check if c0 and c1 match above conditions. + auto *NC0 = dyn_cast(N0->getOperand(1)); + auto *NC1 = dyn_cast(N->getOperand(1)); + if (!NC0 || !NC1) + return SDValue(); + int64_t C0 = NC0->getSExtValue(); + int64_t C1 = NC1->getSExtValue(); + if (C0 == -1 || C0 == 0 || C0 == 1 || (C1 / C0) == 0 || isInt<12>(C1) || + !isInt<12>(C1 % C0) || !isInt<12>(C1 / C0)) + return SDValue(); + // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0). + SDLoc DL(N); + SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0), + DAG.getConstant(C1 / C0, DL, VT)); + SDValue New1 = + DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT)); + if ((C1 % C0) == 0) + return New1; + return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(C1 % C0, DL, VT)); +} + static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG) { // fold (add (select lhs, rhs, cc, 0, y), x) -> // (select lhs, rhs, cc, x, (add x, y)) @@ -6150,8 +6187,12 @@ return DAG.getNode(ISD::AND, DL, VT, NewFMV, DAG.getConstant(~SignBit, DL, VT)); } - case ISD::ADD: + case ISD::ADD: { + SDValue NA = transformAddImmMulImm(N, DAG, Subtarget.getXLen()); + if (NA.getNode() != nullptr) + return NA; return performADDCombine(N, DAG); + } case ISD::SUB: return performSUBCombine(N, DAG); case ISD::AND: Index: llvm/test/CodeGen/RISCV/addimm-mulimm.ll =================================================================== --- llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -342,20 +342,16 @@ define i32 @add_mul_combine_reject_e1(i32 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_e1: ; RV32IMB: # %bb.0: +; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 -; RV32IMB-NEXT: lui a1, 14 -; RV32IMB-NEXT: addi a1, a1, -185 -; RV32IMB-NEXT: add a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e1: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -185 -; RV64IMB-NEXT: addw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -365,20 +361,16 @@ define signext i32 @add_mul_combine_reject_e2(i32 signext %x) { ; RV32IMB-LABEL: add_mul_combine_reject_e2: ; RV32IMB: # %bb.0: +; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 -; RV32IMB-NEXT: lui a1, 14 -; RV32IMB-NEXT: addi a1, a1, -185 -; RV32IMB-NEXT: add a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e2: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -185 -; RV64IMB-NEXT: addw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -402,11 +394,9 @@ ; ; RV64IMB-LABEL: add_mul_combine_reject_e3: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -185 -; RV64IMB-NEXT: add a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57159 @@ -416,20 +406,18 @@ define i32 @add_mul_combine_reject_f1(i32 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_f1: ; RV32IMB: # %bb.0: +; RV32IMB-NEXT: addi a0, a0, 1972 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 -; RV32IMB-NEXT: lui a1, 14 -; RV32IMB-NEXT: addi a1, a1, -145 -; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: addi a0, a0, 11 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f1: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addiw a0, a0, 1972 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -145 -; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -439,20 +427,18 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) { ; RV32IMB-LABEL: add_mul_combine_reject_f2: ; RV32IMB: # %bb.0: +; RV32IMB-NEXT: addi a0, a0, 1972 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 -; RV32IMB-NEXT: lui a1, 14 -; RV32IMB-NEXT: addi a1, a1, -145 -; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: addi a0, a0, 11 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f2: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addiw a0, a0, 1972 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -145 -; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -476,11 +462,10 @@ ; ; RV64IMB-LABEL: add_mul_combine_reject_f3: ; RV64IMB: # %bb.0: +; RV64IMB-NEXT: addi a0, a0, 1972 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 -; RV64IMB-NEXT: lui a1, 14 -; RV64IMB-NEXT: addiw a1, a1, -145 -; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: addi a0, a0, 11 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57199