Index: llvm/lib/Target/LoongArch/LoongArchInstrInfo.td =================================================================== --- llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -859,6 +859,13 @@ : PatFrag<(ops node:$val, node:$count), (operator node:$val, (i64 (shiftMask32 node:$count)))>; +def mul_const_oneuse : PatFrag<(ops node:$A, node:$B), + (mul node:$A, node:$B), [{ + if (auto *N1C = dyn_cast(N->getOperand(1))) + return N1C->hasOneUse(); + return false; +}]>; + let Predicates = [IsLA32] in { def : PatGprGpr; def : PatGprImm; @@ -939,6 +946,31 @@ (AddiPairImmSmall AddiPair:$im))>; } // Predicates = [IsLA64] +let Predicates = [IsLA32] in { +foreach Idx0 = 1...4 in { + foreach Idx1 = 1...4 in { + defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1)); + def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)), + GPR:$r, (i32 Idx1))>; + } +} +} // Predicates = [IsLA32] + +let Predicates = [IsLA64] in { +foreach Idx0 = 1...4 in { + foreach Idx1 = 1...4 in { + defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1)); + def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)), + GPR:$r, (i64 Idx1))>; + def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)), + GPR:$r, (i64 Idx1))>; + } +} +} // Predicates = [IsLA64] + foreach Idx = 1...7 in { defvar ShamtA = !mul(8, Idx); defvar ShamtB = !mul(8, !sub(8, Idx)); Index: llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll =================================================================== --- llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll +++ llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll @@ -288,15 +288,14 @@ define signext i32 @mul_i32_11(i32 %a) { ; LA32-LABEL: mul_i32_11: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 11 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_11: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 11 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 11 ret i32 %b @@ -305,15 +304,14 @@ define signext i32 @mul_i32_13(i32 %a) { ; LA32-LABEL: mul_i32_13: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 13 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_13: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 13 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 13 ret i32 %b @@ -322,15 +320,14 @@ define signext i32 @mul_i32_19(i32 %a) { ; LA32-LABEL: mul_i32_19: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 19 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_19: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 19 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 19 ret i32 %b @@ -339,15 +336,14 @@ define signext i32 @mul_i32_21(i32 %a) { ; LA32-LABEL: mul_i32_21: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 21 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_21: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 21 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 21 ret i32 %b @@ -356,15 +352,14 @@ define signext i32 @mul_i32_25(i32 %a) { ; LA32-LABEL: mul_i32_25: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 25 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_25: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 25 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 25 ret i32 %b @@ -373,15 +368,14 @@ define signext i32 @mul_i32_35(i32 %a) { ; LA32-LABEL: mul_i32_35: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 35 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_35: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 35 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 35 ret i32 %b @@ -390,15 +384,14 @@ define signext i32 @mul_i32_37(i32 %a) { ; LA32-LABEL: mul_i32_37: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 37 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_37: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 37 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 37 ret i32 %b @@ -407,15 +400,14 @@ define signext i32 @mul_i32_41(i32 %a) { ; LA32-LABEL: mul_i32_41: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 41 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_41: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 41 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 41 ret i32 %b @@ -424,15 +416,14 @@ define signext i32 @mul_i32_49(i32 %a) { ; LA32-LABEL: mul_i32_49: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 49 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_49: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 49 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 49 ret i32 %b @@ -441,15 +432,14 @@ define signext i32 @mul_i32_69(i32 %a) { ; LA32-LABEL: mul_i32_69: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 69 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_69: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 69 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 69 ret i32 %b @@ -458,15 +448,14 @@ define signext i32 @mul_i32_73(i32 %a) { ; LA32-LABEL: mul_i32_73: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 73 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_73: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 73 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 73 ret i32 %b @@ -475,15 +464,14 @@ define signext i32 @mul_i32_137(i32 %a) { ; LA32-LABEL: mul_i32_137: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 137 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_137: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 137 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 137 ret i32 %b @@ -492,15 +480,14 @@ define signext i32 @mul_i32_145(i32 %a) { ; LA32-LABEL: mul_i32_145: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 145 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_145: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 145 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 145 ret i32 %b @@ -509,15 +496,14 @@ define signext i32 @mul_i32_273(i32 %a) { ; LA32-LABEL: mul_i32_273: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 273 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_273: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 273 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 273 ret i32 %b @@ -535,8 +521,8 @@ ; ; LA64-LABEL: mul_i64_11: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 11 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 11 ret i64 %b @@ -554,8 +540,8 @@ ; ; LA64-LABEL: mul_i64_13: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 13 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 13 ret i64 %b @@ -573,8 +559,8 @@ ; ; LA64-LABEL: mul_i64_19: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 19 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 19 ret i64 %b @@ -592,8 +578,8 @@ ; ; LA64-LABEL: mul_i64_21: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 21 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 21 ret i64 %b @@ -611,8 +597,8 @@ ; ; LA64-LABEL: mul_i64_25: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 25 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 25 ret i64 %b @@ -630,8 +616,8 @@ ; ; LA64-LABEL: mul_i64_35: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 35 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 35 ret i64 %b @@ -649,8 +635,8 @@ ; ; LA64-LABEL: mul_i64_37: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 37 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 37 ret i64 %b @@ -668,8 +654,8 @@ ; ; LA64-LABEL: mul_i64_41: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 41 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 41 ret i64 %b @@ -687,8 +673,8 @@ ; ; LA64-LABEL: mul_i64_49: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 49 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 49 ret i64 %b @@ -706,8 +692,8 @@ ; ; LA64-LABEL: mul_i64_69: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 69 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 69 ret i64 %b @@ -725,8 +711,8 @@ ; ; LA64-LABEL: mul_i64_73: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 73 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 73 ret i64 %b @@ -744,8 +730,8 @@ ; ; LA64-LABEL: mul_i64_137: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 137 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 137 ret i64 %b @@ -763,8 +749,8 @@ ; ; LA64-LABEL: mul_i64_145: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 145 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 145 ret i64 %b @@ -782,8 +768,8 @@ ; ; LA64-LABEL: mul_i64_273: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 273 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 273 ret i64 %b