diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -896,6 +896,10 @@ return N->hasOneUse(); }]>; +def mul_oneuse : PatFrag<(ops node:$A, node:$B), (mul node:$A, node:$B), [{ + return N->hasOneUse(); +}]>; + /// Simple arithmetic operations def : PatGprGpr; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -970,6 +970,25 @@ (SH2ADD GPR:$rs1, GPR:$rs2)>; def : Pat<(add (shl GPR:$rs1, (XLenVT 3)), GPR:$rs2), (SH3ADD GPR:$rs1, GPR:$rs2)>; + +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2), + (SH1ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2), + (SH1ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2), + (SH1ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2), + (SH2ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2), + (SH2ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2), + (SH2ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2), + (SH3ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2), + (SH3ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; +def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2), + (SH3ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>; } // Predicates = [HasStdExtZba] let Predicates = [HasStdExtZba, IsRV64] in { diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll --- a/llvm/test/CodeGen/RISCV/rv32zba.ll +++ b/llvm/test/CodeGen/RISCV/rv32zba.ll @@ -91,16 +91,14 @@ ; ; RV32IB-LABEL: addmul6: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 6 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh1add a0, a0, a0 +; RV32IB-NEXT: sh1add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul6: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 6 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh1add a0, a0, a0 +; RV32IBA-NEXT: sh1add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 6 %d = add i32 %c, %b @@ -117,16 +115,14 @@ ; ; RV32IB-LABEL: addmul10: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 10 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh2add a0, a0, a0 +; RV32IB-NEXT: sh1add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul10: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 10 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh2add a0, a0, a0 +; RV32IBA-NEXT: sh1add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 10 %d = add i32 %c, %b @@ -143,16 +139,14 @@ ; ; RV32IB-LABEL: addmul12: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 12 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh1add a0, a0, a0 +; RV32IB-NEXT: sh2add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul12: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 12 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh1add a0, a0, a0 +; RV32IBA-NEXT: sh2add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 12 %d = add i32 %c, %b @@ -169,16 +163,14 @@ ; ; RV32IB-LABEL: addmul18: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 18 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh3add a0, a0, a0 +; RV32IB-NEXT: sh1add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul18: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 18 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh3add a0, a0, a0 +; RV32IBA-NEXT: sh1add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 18 %d = add i32 %c, %b @@ -195,16 +187,14 @@ ; ; RV32IB-LABEL: addmul20: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 20 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh2add a0, a0, a0 +; RV32IB-NEXT: sh2add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul20: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 20 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh2add a0, a0, a0 +; RV32IBA-NEXT: sh2add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 20 %d = add i32 %c, %b @@ -221,16 +211,14 @@ ; ; RV32IB-LABEL: addmul24: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 24 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh1add a0, a0, a0 +; RV32IB-NEXT: sh3add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul24: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 24 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh1add a0, a0, a0 +; RV32IBA-NEXT: sh3add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 24 %d = add i32 %c, %b @@ -247,16 +235,14 @@ ; ; RV32IB-LABEL: addmul36: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 36 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh3add a0, a0, a0 +; RV32IB-NEXT: sh2add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul36: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 36 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh3add a0, a0, a0 +; RV32IBA-NEXT: sh2add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 36 %d = add i32 %c, %b @@ -273,16 +259,14 @@ ; ; RV32IB-LABEL: addmul40: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 40 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh2add a0, a0, a0 +; RV32IB-NEXT: sh3add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul40: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 40 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh2add a0, a0, a0 +; RV32IBA-NEXT: sh3add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 40 %d = add i32 %c, %b @@ -299,16 +283,14 @@ ; ; RV32IB-LABEL: addmul72: ; RV32IB: # %bb.0: -; RV32IB-NEXT: addi a2, zero, 72 -; RV32IB-NEXT: mul a0, a0, a2 -; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: sh3add a0, a0, a0 +; RV32IB-NEXT: sh3add a0, a0, a1 ; RV32IB-NEXT: ret ; ; RV32IBA-LABEL: addmul72: ; RV32IBA: # %bb.0: -; RV32IBA-NEXT: addi a2, zero, 72 -; RV32IBA-NEXT: mul a0, a0, a2 -; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: sh3add a0, a0, a0 +; RV32IBA-NEXT: sh3add a0, a0, a1 ; RV32IBA-NEXT: ret %c = mul i32 %a, 72 %d = add i32 %c, %b diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -381,16 +381,14 @@ ; ; RV64IB-LABEL: addmul6: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 6 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh1add a0, a0, a0 +; RV64IB-NEXT: sh1add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul6: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 6 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh1add a0, a0, a0 +; RV64IBA-NEXT: sh1add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 6 %d = add i64 %c, %b @@ -407,16 +405,14 @@ ; ; RV64IB-LABEL: addmul10: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 10 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh2add a0, a0, a0 +; RV64IB-NEXT: sh1add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul10: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 10 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh2add a0, a0, a0 +; RV64IBA-NEXT: sh1add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 10 %d = add i64 %c, %b @@ -433,16 +429,14 @@ ; ; RV64IB-LABEL: addmul12: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 12 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh1add a0, a0, a0 +; RV64IB-NEXT: sh2add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul12: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 12 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh1add a0, a0, a0 +; RV64IBA-NEXT: sh2add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 12 %d = add i64 %c, %b @@ -459,16 +453,14 @@ ; ; RV64IB-LABEL: addmul18: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 18 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh3add a0, a0, a0 +; RV64IB-NEXT: sh1add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul18: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 18 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh3add a0, a0, a0 +; RV64IBA-NEXT: sh1add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 18 %d = add i64 %c, %b @@ -485,16 +477,14 @@ ; ; RV64IB-LABEL: addmul20: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 20 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh2add a0, a0, a0 +; RV64IB-NEXT: sh2add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul20: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 20 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh2add a0, a0, a0 +; RV64IBA-NEXT: sh2add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 20 %d = add i64 %c, %b @@ -511,16 +501,14 @@ ; ; RV64IB-LABEL: addmul24: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 24 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh1add a0, a0, a0 +; RV64IB-NEXT: sh3add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul24: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 24 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh1add a0, a0, a0 +; RV64IBA-NEXT: sh3add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 24 %d = add i64 %c, %b @@ -537,16 +525,14 @@ ; ; RV64IB-LABEL: addmul36: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 36 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh3add a0, a0, a0 +; RV64IB-NEXT: sh2add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul36: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 36 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh3add a0, a0, a0 +; RV64IBA-NEXT: sh2add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 36 %d = add i64 %c, %b @@ -563,16 +549,14 @@ ; ; RV64IB-LABEL: addmul40: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 40 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh2add a0, a0, a0 +; RV64IB-NEXT: sh3add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul40: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 40 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh2add a0, a0, a0 +; RV64IBA-NEXT: sh3add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 40 %d = add i64 %c, %b @@ -589,16 +573,14 @@ ; ; RV64IB-LABEL: addmul72: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a2, zero, 72 -; RV64IB-NEXT: mul a0, a0, a2 -; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: sh3add a0, a0, a0 +; RV64IB-NEXT: sh3add a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBA-LABEL: addmul72: ; RV64IBA: # %bb.0: -; RV64IBA-NEXT: addi a2, zero, 72 -; RV64IBA-NEXT: mul a0, a0, a2 -; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: sh3add a0, a0, a0 +; RV64IBA-NEXT: sh3add a0, a0, a1 ; RV64IBA-NEXT: ret %c = mul i64 %a, 72 %d = add i64 %c, %b