Index: llvm/lib/Target/RISCV/RISCVInstrInfoB.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -212,6 +212,90 @@ N->getValueType(0)); }]>; +def AddShlShl_1A : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 + 1; +}]>; + +def AddShlShl_1B : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 - 1; +}]>; + +def AddShlShl_2A : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 + 2; +}]>; + +def AddShlShl_2B : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 - 2; +}]>; + +def AddShlShl_3A : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 + 3; +}]>; + +def AddShlShl_3B : PatFrag<(ops node:$A, node:$B, node:$C, node:$D), + (add (shl node:$A, node:$B), + (shl node:$C, node:$D)), [{ + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); + if (!N0.hasOneUse() || !N1.hasOneUse()) + return false; + auto *N0C = dyn_cast(N0.getOperand(1)); + auto *N1C = dyn_cast(N1.getOperand(1)); + if (!N0C || !N1C) + return false; + uint64_t C0 = N0C->getZExtValue(), C1 = N1C->getZExtValue(); + return C0 == C1 - 3; +}]>; + //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// @@ -1017,6 +1101,24 @@ (SH2ADD GPR:$rs1, GPR:$rs2)>; def : Pat<(add (shl GPR:$rs1, (XLenVT 3)), non_imm12:$rs2), (SH3ADD GPR:$rs1, GPR:$rs2)>; +def : Pat<(AddShlShl_1A GPR:$rs1, uimmlog2xlen, + non_imm12:$rs2, uimmlog2xlen:$sh), + (SLLI (SH1ADD GPR:$rs1, GPR:$rs2), uimmlog2xlen:$sh)>; +def : Pat<(AddShlShl_1B non_imm12:$rs1, uimmlog2xlen:$sh, + GPR:$rs2, uimmlog2xlen), + (SLLI (SH1ADD GPR:$rs2, GPR:$rs1), uimmlog2xlen:$sh)>; +def : Pat<(AddShlShl_2A GPR:$rs1, uimmlog2xlen, + non_imm12:$rs2, uimmlog2xlen:$sh), + (SLLI (SH2ADD GPR:$rs1, GPR:$rs2), uimmlog2xlen:$sh)>; +def : Pat<(AddShlShl_2B non_imm12:$rs1, uimmlog2xlen:$sh, + GPR:$rs2, uimmlog2xlen), + (SLLI (SH2ADD GPR:$rs2, GPR:$rs1), uimmlog2xlen:$sh)>; +def : Pat<(AddShlShl_3A GPR:$rs1, uimmlog2xlen, + non_imm12:$rs2, uimmlog2xlen:$sh), + (SLLI (SH3ADD GPR:$rs1, GPR:$rs2), uimmlog2xlen:$sh)>; +def : Pat<(AddShlShl_3B non_imm12:$rs1, uimmlog2xlen:$sh, + GPR:$rs2, uimmlog2xlen), + (SLLI (SH3ADD GPR:$rs2, GPR:$rs1), uimmlog2xlen:$sh)>; def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2), (SH1ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; Index: llvm/test/CodeGen/RISCV/rv32zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zba.ll +++ llvm/test/CodeGen/RISCV/rv32zba.ll @@ -796,16 +796,14 @@ ; ; RV32B-LABEL: addshl_5_6: ; RV32B: # %bb.0: +; RV32B-NEXT: sh1add a0, a1, a0 ; RV32B-NEXT: slli a0, a0, 5 -; RV32B-NEXT: slli a1, a1, 6 -; RV32B-NEXT: add a0, a0, a1 ; RV32B-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_6: ; RV32ZBA: # %bb.0: +; RV32ZBA-NEXT: sh1add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 -; RV32ZBA-NEXT: slli a1, a1, 6 -; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -823,16 +821,14 @@ ; ; RV32B-LABEL: addshl_5_7: ; RV32B: # %bb.0: +; RV32B-NEXT: sh2add a0, a1, a0 ; RV32B-NEXT: slli a0, a0, 5 -; RV32B-NEXT: slli a1, a1, 7 -; RV32B-NEXT: add a0, a0, a1 ; RV32B-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_7: ; RV32ZBA: # %bb.0: +; RV32ZBA-NEXT: sh2add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 -; RV32ZBA-NEXT: slli a1, a1, 7 -; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -850,16 +846,14 @@ ; ; RV32B-LABEL: addshl_5_8: ; RV32B: # %bb.0: +; RV32B-NEXT: sh3add a0, a1, a0 ; RV32B-NEXT: slli a0, a0, 5 -; RV32B-NEXT: slli a1, a1, 8 -; RV32B-NEXT: add a0, a0, a1 ; RV32B-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_8: ; RV32ZBA: # %bb.0: +; RV32ZBA-NEXT: sh3add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 -; RV32ZBA-NEXT: slli a1, a1, 8 -; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 Index: llvm/test/CodeGen/RISCV/rv64zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv64zba.ll +++ llvm/test/CodeGen/RISCV/rv64zba.ll @@ -1407,16 +1407,14 @@ ; ; RV64B-LABEL: addshl32_5_6: ; RV64B: # %bb.0: +; RV64B-NEXT: sh1add a0, a1, a0 ; RV64B-NEXT: slliw a0, a0, 5 -; RV64B-NEXT: slliw a1, a1, 6 -; RV64B-NEXT: addw a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_6: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 -; RV64ZBA-NEXT: slliw a1, a1, 6 -; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -1434,16 +1432,14 @@ ; ; RV64B-LABEL: addshl64_5_6: ; RV64B: # %bb.0: +; RV64B-NEXT: sh1add a0, a1, a0 ; RV64B-NEXT: slli a0, a0, 5 -; RV64B-NEXT: slli a1, a1, 6 -; RV64B-NEXT: add a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_6: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 -; RV64ZBA-NEXT: slli a1, a1, 6 -; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 6 @@ -1461,16 +1457,14 @@ ; ; RV64B-LABEL: addshl32_5_7: ; RV64B: # %bb.0: +; RV64B-NEXT: sh2add a0, a1, a0 ; RV64B-NEXT: slliw a0, a0, 5 -; RV64B-NEXT: slliw a1, a1, 7 -; RV64B-NEXT: addw a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_7: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 -; RV64ZBA-NEXT: slliw a1, a1, 7 -; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -1488,16 +1482,14 @@ ; ; RV64B-LABEL: addshl64_5_7: ; RV64B: # %bb.0: +; RV64B-NEXT: sh2add a0, a1, a0 ; RV64B-NEXT: slli a0, a0, 5 -; RV64B-NEXT: slli a1, a1, 7 -; RV64B-NEXT: add a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_7: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 -; RV64ZBA-NEXT: slli a1, a1, 7 -; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 7 @@ -1515,16 +1507,14 @@ ; ; RV64B-LABEL: addshl32_5_8: ; RV64B: # %bb.0: +; RV64B-NEXT: sh3add a0, a1, a0 ; RV64B-NEXT: slliw a0, a0, 5 -; RV64B-NEXT: slliw a1, a1, 8 -; RV64B-NEXT: addw a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_8: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 -; RV64ZBA-NEXT: slliw a1, a1, 8 -; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 @@ -1542,16 +1532,14 @@ ; ; RV64B-LABEL: addshl64_5_8: ; RV64B: # %bb.0: +; RV64B-NEXT: sh3add a0, a1, a0 ; RV64B-NEXT: slli a0, a0, 5 -; RV64B-NEXT: slli a1, a1, 8 -; RV64B-NEXT: add a0, a0, a1 ; RV64B-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_8: ; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 -; RV64ZBA-NEXT: slli a1, a1, 8 -; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 8