Index: llvm/lib/Target/LoongArch/LoongArchInstrInfo.td =================================================================== --- llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -834,6 +834,14 @@ : PatFrag<(ops node:$val, node:$count), (operator node:$val, (i64 (shiftMask32 node:$count)))>; +def mul_const_oneuse : PatFrag<(ops node:$A, node:$B), + (mul node:$A, node:$B), [{ + if (auto *N1C = dyn_cast(N->getOperand(1))) + if (N1C->hasOneUse()) + return true; + return false; +}]>; + let Predicates = [IsLA32] in { def : PatGprGpr; def : PatGprImm; @@ -899,6 +907,139 @@ (ADDI_W (ADDU16I_D GPR:$rj, (HI16ForAddu16idAddiPair $imm)), (LO12 $imm))>; +let Predicates = [IsLA32] in { +def : Pat<(mul_const_oneuse GPR:$r, (i32 13)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 1)), + GPR:$r, (i32 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 25)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 1)), + GPR:$r, (i32 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 49)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 1)), + GPR:$r, (i32 4))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 11)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 2)), + GPR:$r, (i32 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 21)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 2)), + GPR:$r, (i32 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 41)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 2)), + GPR:$r, (i32 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 19)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 3)), + GPR:$r, (i32 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 37)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 3)), + GPR:$r, (i32 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 73)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 3)), + GPR:$r, (i32 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 145)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 3)), + GPR:$r, (i32 4))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 35)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 4)), + GPR:$r, (i32 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 69)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 4)), + GPR:$r, (i32 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 137)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 4)), + GPR:$r, (i32 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i32 273)), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 4)), + GPR:$r, (i32 4))>; +} // Predicates = [IsLA32] + +let Predicates = [IsLA64] in { +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 13)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 2))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 25)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 3))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 49)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 4))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 11)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 1))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 21)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 2))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 41)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 3))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 19)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 1))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 37)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 2))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 73)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 3))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 145)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 4))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 35)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 1))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 69)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 2))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 137)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 3))>; +def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 273)), i32), + (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 4))>; + +def : Pat<(mul_const_oneuse GPR:$r, (i64 13)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 25)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 49)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 1)), + GPR:$r, (i64 4))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 11)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 21)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 41)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 2)), + GPR:$r, (i64 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 19)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 37)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 73)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 145)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 3)), + GPR:$r, (i64 4))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 35)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 1))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 69)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 2))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 137)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 3))>; +def : Pat<(mul_const_oneuse GPR:$r, (i64 273)), + (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 4)), + GPR:$r, (i64 4))>; +} // Predicates = [IsLA64] + foreach Idx = 1...7 in { defvar ShamtA = !mul(8, Idx); defvar ShamtB = !mul(8, !sub(8, Idx)); Index: llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll =================================================================== --- llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll +++ llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll @@ -288,15 +288,14 @@ define signext i32 @mul_i32_11(i32 %a) { ; LA32-LABEL: mul_i32_11: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 11 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_11: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 11 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 11 ret i32 %b @@ -305,15 +304,14 @@ define signext i32 @mul_i32_13(i32 %a) { ; LA32-LABEL: mul_i32_13: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 13 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_13: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 13 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 13 ret i32 %b @@ -322,15 +320,14 @@ define signext i32 @mul_i32_19(i32 %a) { ; LA32-LABEL: mul_i32_19: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 19 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_19: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 19 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 19 ret i32 %b @@ -339,15 +336,14 @@ define signext i32 @mul_i32_21(i32 %a) { ; LA32-LABEL: mul_i32_21: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 21 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_21: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 21 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 21 ret i32 %b @@ -356,15 +352,14 @@ define signext i32 @mul_i32_25(i32 %a) { ; LA32-LABEL: mul_i32_25: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 25 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_25: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 25 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 25 ret i32 %b @@ -373,15 +368,14 @@ define signext i32 @mul_i32_35(i32 %a) { ; LA32-LABEL: mul_i32_35: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 35 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_35: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 35 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i32 %a, 35 ret i32 %b @@ -390,15 +384,14 @@ define signext i32 @mul_i32_37(i32 %a) { ; LA32-LABEL: mul_i32_37: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 37 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_37: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 37 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 37 ret i32 %b @@ -407,15 +400,14 @@ define signext i32 @mul_i32_41(i32 %a) { ; LA32-LABEL: mul_i32_41: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 41 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_41: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 41 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 41 ret i32 %b @@ -424,15 +416,14 @@ define signext i32 @mul_i32_49(i32 %a) { ; LA32-LABEL: mul_i32_49: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 49 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_49: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 49 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 49 ret i32 %b @@ -441,15 +432,14 @@ define signext i32 @mul_i32_69(i32 %a) { ; LA32-LABEL: mul_i32_69: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 69 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_69: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 69 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i32 %a, 69 ret i32 %b @@ -458,15 +448,14 @@ define signext i32 @mul_i32_73(i32 %a) { ; LA32-LABEL: mul_i32_73: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 73 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_73: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 73 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 73 ret i32 %b @@ -475,15 +464,14 @@ define signext i32 @mul_i32_137(i32 %a) { ; LA32-LABEL: mul_i32_137: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 137 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_137: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 137 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i32 %a, 137 ret i32 %b @@ -492,15 +480,14 @@ define signext i32 @mul_i32_145(i32 %a) { ; LA32-LABEL: mul_i32_145: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 145 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_145: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 145 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 145 ret i32 %b @@ -509,15 +496,14 @@ define signext i32 @mul_i32_273(i32 %a) { ; LA32-LABEL: mul_i32_273: ; LA32: # %bb.0: -; LA32-NEXT: ori $a1, $zero, 273 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_273: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 273 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: alsl.w $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.w $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i32 %a, 273 ret i32 %b @@ -535,8 +521,8 @@ ; ; LA64-LABEL: mul_i64_11: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 11 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 11 ret i64 %b @@ -554,8 +540,8 @@ ; ; LA64-LABEL: mul_i64_13: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 13 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 13 ret i64 %b @@ -573,8 +559,8 @@ ; ; LA64-LABEL: mul_i64_19: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 19 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 19 ret i64 %b @@ -592,8 +578,8 @@ ; ; LA64-LABEL: mul_i64_21: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 21 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 21 ret i64 %b @@ -611,8 +597,8 @@ ; ; LA64-LABEL: mul_i64_25: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 25 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 25 ret i64 %b @@ -630,8 +616,8 @@ ; ; LA64-LABEL: mul_i64_35: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 35 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 1 ; LA64-NEXT: ret %b = mul i64 %a, 35 ret i64 %b @@ -649,8 +635,8 @@ ; ; LA64-LABEL: mul_i64_37: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 37 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 37 ret i64 %b @@ -668,8 +654,8 @@ ; ; LA64-LABEL: mul_i64_41: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 41 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 2 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 41 ret i64 %b @@ -687,8 +673,8 @@ ; ; LA64-LABEL: mul_i64_49: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 49 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 1 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 49 ret i64 %b @@ -706,8 +692,8 @@ ; ; LA64-LABEL: mul_i64_69: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 69 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 2 ; LA64-NEXT: ret %b = mul i64 %a, 69 ret i64 %b @@ -725,8 +711,8 @@ ; ; LA64-LABEL: mul_i64_73: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 73 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 73 ret i64 %b @@ -744,8 +730,8 @@ ; ; LA64-LABEL: mul_i64_137: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 137 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 3 ; LA64-NEXT: ret %b = mul i64 %a, 137 ret i64 %b @@ -763,8 +749,8 @@ ; ; LA64-LABEL: mul_i64_145: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 145 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 145 ret i64 %b @@ -782,8 +768,8 @@ ; ; LA64-LABEL: mul_i64_273: ; LA64: # %bb.0: -; LA64-NEXT: ori $a1, $zero, 273 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: alsl.d $a1, $a0, $a0, 4 +; LA64-NEXT: alsl.d $a0, $a1, $a0, 4 ; LA64-NEXT: ret %b = mul i64 %a, 273 ret i64 %b