Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6403,7 +6403,9 @@ // Transform (add (mul x, c0), c1) -> // (add (mul (add x, c1/c0), c0), c1%c0). -// if c1/c0 and c1%c0 are simm12, while c1 is not. +// if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case +// should also be excluded is when c0*(c1/c0) is simm12, which will lead +// to an infinite loop in DAGCombine. // Or transform (add (mul x, c0), c1) -> // (mul (add x, c1/c0), c0). // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not. @@ -6425,10 +6427,7 @@ int64_t C0 = N0C->getSExtValue(); int64_t C1 = N1C->getSExtValue(); if (C0 == -1 || C0 == 0 || C0 == 1 || (C1 / C0) == 0 || isInt<12>(C1) || - !isInt<12>(C1 % C0) || !isInt<12>(C1 / C0)) - return SDValue(); - // If C0 * (C1 / C0) is a 12-bit integer, this transform will be reversed. - if (isInt<12>(C0 * (C1 / C0))) + !isInt<12>(C1 % C0) || !isInt<12>(C1 / C0) || isInt<12>(C0 * (C1 / C0))) return SDValue(); // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0). SDLoc DL(N); @@ -6441,18 +6440,50 @@ return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(C1 % C0, DL, VT)); } +// Transform (add (shl x, c0), c1) -> +// (add (shl (add x, c1>>c0), c0), c1-(c1>>c0<>c0 and c1-(c1>>c0<>c0< +// (shl (add x, c1>>c0), c0), +// if c1-(c1>>c0<>c0 is simm12 while c1 is not. +static SDValue transformAddImmShlImm(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + // Skip for vector types and larger types. + EVT VT = N->getValueType(0); + if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen()) + return SDValue(); + // The first operand node must be a SHL and has no other use. + SDValue N0 = N->getOperand(0); + if (!N0->hasOneUse() || N0->getOpcode() != ISD::SHL) + return SDValue(); + // Check if c0 and c1 match above conditions. + auto *N0C = dyn_cast(N0->getOperand(1)); + auto *N1C = dyn_cast(N->getOperand(1)); + if (!N0C || !N1C) + return SDValue(); + int64_t C0 = N0C->getSExtValue(); + int64_t C1 = N1C->getSExtValue(); + if (C0 <= 1 || isInt<12>(C1) || (C1 >> C0) == 0 || !isInt<12>(C1 >> C0) || + !isInt<12>(C1 - (C1 >> C0 << C0))) + return SDValue(); + // Build new nodes. + SDLoc DL(N); + SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0), + DAG.getConstant(C1 >> C0, DL, VT)); + SDValue New1 = + DAG.getNode(ISD::SHL, DL, VT, New0, DAG.getConstant(C0, DL, VT)); + return DAG.getNode(ISD::ADD, DL, VT, New1, + DAG.getConstant(C1 - (C1 >> C0 << C0), DL, VT)); +} + static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { - // Transform (add (mul x, c0), c1) -> - // (add (mul (add x, c1/c0), c0), c1%c0). - // if c1/c0 and c1%c0 are simm12, while c1 is not. - // Or transform (add (mul x, c0), c1) -> - // (mul (add x, c1/c0), c0). - // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not. + if (SDValue V = transformAddImmShlImm(N, DAG, Subtarget)) + return V; if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget)) return V; - // Fold (add (shl x, c0), (shl y, c1)) -> - // (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3]. if (SDValue V = transformAddShlImm(N, DAG, Subtarget)) return V; // fold (add (select lhs, rhs, cc, 0, y), x) -> Index: llvm/test/CodeGen/RISCV/shlimm-addimm.ll =================================================================== --- llvm/test/CodeGen/RISCV/shlimm-addimm.ll +++ llvm/test/CodeGen/RISCV/shlimm-addimm.ll @@ -136,18 +136,14 @@ define i32 @shl5_add47968_a(i32 %x) { ; RV32I-LABEL: shl5_add47968_a: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, 1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 12 -; RV32I-NEXT: addi a1, a1, -1184 -; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47968_a: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, 1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1184 -; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47968 @@ -157,18 +153,14 @@ define signext i32 @shl5_add47968_b(i32 signext %x) { ; RV32I-LABEL: shl5_add47968_b: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, 1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 12 -; RV32I-NEXT: addi a1, a1, -1184 -; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47968_b: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, 1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1184 -; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47968 @@ -191,10 +183,8 @@ ; ; RV64I-LABEL: shl5_add47968_c: ; RV64I: # %bb.0: +; RV64I-NEXT: addi a0, a0, 1499 ; RV64I-NEXT: slli a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1184 -; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 47968 @@ -204,18 +194,16 @@ define i32 @shl5_add47969_a(i32 %x) { ; RV32I-LABEL: shl5_add47969_a: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, 1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 12 -; RV32I-NEXT: addi a1, a1, -1183 -; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47969_a: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, 1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1183 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47969 @@ -225,18 +213,16 @@ define signext i32 @shl5_add47969_b(i32 signext %x) { ; RV32I-LABEL: shl5_add47969_b: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, 1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 12 -; RV32I-NEXT: addi a1, a1, -1183 -; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47969_b: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, 1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1183 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47969 @@ -259,10 +245,9 @@ ; ; RV64I-LABEL: shl5_add47969_c: ; RV64I: # %bb.0: +; RV64I-NEXT: addi a0, a0, 1499 ; RV64I-NEXT: slli a0, a0, 5 -; RV64I-NEXT: lui a1, 12 -; RV64I-NEXT: addiw a1, a1, -1183 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 1 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 47969 @@ -272,18 +257,14 @@ define i32 @shl5_sub47968_a(i32 %x) { ; RV32I-LABEL: shl5_sub47968_a: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, -1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 1048564 -; RV32I-NEXT: addi a1, a1, 1184 -; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47968_a: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, -1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1184 -; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47968 @@ -293,18 +274,14 @@ define signext i32 @shl5_sub47968_b(i32 signext %x) { ; RV32I-LABEL: shl5_sub47968_b: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, -1499 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 1048564 -; RV32I-NEXT: addi a1, a1, 1184 -; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47968_b: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, -1499 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1184 -; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47968 @@ -328,10 +305,8 @@ ; ; RV64I-LABEL: shl5_sub47968_c: ; RV64I: # %bb.0: +; RV64I-NEXT: addi a0, a0, -1499 ; RV64I-NEXT: slli a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1184 -; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, -47968 @@ -341,18 +316,16 @@ define i32 @shl5_sub47969_a(i32 %x) { ; RV32I-LABEL: shl5_sub47969_a: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, -1500 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 1048564 -; RV32I-NEXT: addi a1, a1, 1183 -; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ori a0, a0, 31 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47969_a: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, -1500 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1183 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 31 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47969 @@ -362,18 +335,16 @@ define signext i32 @shl5_sub47969_b(i32 signext %x) { ; RV32I-LABEL: shl5_sub47969_b: ; RV32I: # %bb.0: +; RV32I-NEXT: addi a0, a0, -1500 ; RV32I-NEXT: slli a0, a0, 5 -; RV32I-NEXT: lui a1, 1048564 -; RV32I-NEXT: addi a1, a1, 1183 -; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ori a0, a0, 31 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47969_b: ; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, -1500 ; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1183 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 31 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47969 @@ -397,10 +368,9 @@ ; ; RV64I-LABEL: shl5_sub47969_c: ; RV64I: # %bb.0: +; RV64I-NEXT: addi a0, a0, -1500 ; RV64I-NEXT: slli a0, a0, 5 -; RV64I-NEXT: lui a1, 1048564 -; RV64I-NEXT: addiw a1, a1, 1183 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ori a0, a0, 31 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, -47969