diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -3131,12 +3131,17 @@ if (VT.getSizeInBits() > Subtarget.getGRLen()) return false; - // Break MUL into (SLLI + ADD/SUB) or ALSL. if (auto *ConstNode = dyn_cast(C.getNode())) { const APInt &Imm = ConstNode->getAPIntValue(); + // Break MUL into (SLLI + ADD/SUB) or ALSL. if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) return true; + // Break MUL into (ALSL x, (SLLI x, imm0), imm1). + if (ConstNode->hasOneUse() && + ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() || + (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2())) + return true; } return false; diff --git a/llvm/test/CodeGen/LoongArch/alsl.ll b/llvm/test/CodeGen/LoongArch/alsl.ll --- a/llvm/test/CodeGen/LoongArch/alsl.ll +++ b/llvm/test/CodeGen/LoongArch/alsl.ll @@ -149,15 +149,15 @@ define i16 @mul_add_i16(i16 signext %a, i16 signext %b) nounwind { ; LA32-LABEL: mul_add_i16: ; LA32: # %bb.0: # %entry -; LA32-NEXT: ori $a2, $zero, 10 -; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: slli.w $a2, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a0, $a2, 1 ; LA32-NEXT: add.w $a0, $a1, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_add_i16: ; LA64: # %bb.0: # %entry -; LA64-NEXT: ori $a2, $zero, 10 -; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: slli.d $a2, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a0, $a2, 1 ; LA64-NEXT: add.d $a0, $a1, $a0 ; LA64-NEXT: ret entry: @@ -169,15 +169,15 @@ define i32 @mul_add_i32(i32 signext %a, i32 signext %b) nounwind { ; LA32-LABEL: mul_add_i32: ; LA32: # %bb.0: # %entry -; LA32-NEXT: ori $a2, $zero, 12 -; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: slli.w $a2, $a0, 3 +; LA32-NEXT: alsl.w $a0, $a0, $a2, 2 ; LA32-NEXT: add.w $a0, $a1, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_add_i32: ; LA64: # %bb.0: # %entry -; LA64-NEXT: ori $a2, $zero, 12 -; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: slli.d $a2, $a0, 3 +; LA64-NEXT: alsl.d $a0, $a0, $a2, 2 ; LA64-NEXT: add.d $a0, $a1, $a0 ; LA64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll @@ -1023,17 +1023,15 @@ define signext i32 @mul_i32_4098(i32 %a) { ; LA32-LABEL: mul_i32_4098: ; LA32: # %bb.0: -; LA32-NEXT: lu12i.w $a1, 1 -; LA32-NEXT: ori $a1, $a1, 2 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: slli.w $a1, $a0, 12 +; LA32-NEXT: alsl.w $a0, $a0, $a1, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_4098: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 2 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: slli.d $a1, $a0, 1 +; LA64-NEXT: slli.d $a0, $a0, 12 +; LA64-NEXT: add.w $a0, $a0, $a1 ; LA64-NEXT: ret %b = mul i32 %a, 4098 ret i32 %b @@ -1042,17 +1040,15 @@ define signext i32 @mul_i32_4100(i32 %a) { ; LA32-LABEL: mul_i32_4100: ; LA32: # %bb.0: -; LA32-NEXT: lu12i.w $a1, 1 -; LA32-NEXT: ori $a1, $a1, 4 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: slli.w $a1, $a0, 12 +; LA32-NEXT: alsl.w $a0, $a0, $a1, 2 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_4100: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 4 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: slli.d $a1, $a0, 2 +; LA64-NEXT: slli.d $a0, $a0, 12 +; LA64-NEXT: add.w $a0, $a0, $a1 ; LA64-NEXT: ret %b = mul i32 %a, 4100 ret i32 %b @@ -1061,17 +1057,15 @@ define signext i32 @mul_i32_4104(i32 %a) { ; LA32-LABEL: mul_i32_4104: ; LA32: # %bb.0: -; LA32-NEXT: lu12i.w $a1, 1 -; LA32-NEXT: ori $a1, $a1, 8 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: slli.w $a1, $a0, 12 +; LA32-NEXT: alsl.w $a0, $a0, $a1, 3 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_4104: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 8 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: slli.d $a1, $a0, 3 +; LA64-NEXT: slli.d $a0, $a0, 12 +; LA64-NEXT: add.w $a0, $a0, $a1 ; LA64-NEXT: ret %b = mul i32 %a, 4104 ret i32 %b @@ -1080,17 +1074,15 @@ define signext i32 @mul_i32_4112(i32 %a) { ; LA32-LABEL: mul_i32_4112: ; LA32: # %bb.0: -; LA32-NEXT: lu12i.w $a1, 1 -; LA32-NEXT: ori $a1, $a1, 16 -; LA32-NEXT: mul.w $a0, $a0, $a1 +; LA32-NEXT: slli.w $a1, $a0, 12 +; LA32-NEXT: alsl.w $a0, $a0, $a1, 4 ; LA32-NEXT: ret ; ; LA64-LABEL: mul_i32_4112: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 16 -; LA64-NEXT: mul.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: slli.d $a1, $a0, 4 +; LA64-NEXT: slli.d $a0, $a0, 12 +; LA64-NEXT: add.w $a0, $a0, $a1 ; LA64-NEXT: ret %b = mul i32 %a, 4112 ret i32 %b @@ -1109,9 +1101,8 @@ ; ; LA64-LABEL: mul_i64_4098: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 2 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: slli.d $a1, $a0, 12 +; LA64-NEXT: alsl.d $a0, $a0, $a1, 1 ; LA64-NEXT: ret %b = mul i64 %a, 4098 ret i64 %b @@ -1130,9 +1121,8 @@ ; ; LA64-LABEL: mul_i64_4100: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 4 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: slli.d $a1, $a0, 12 +; LA64-NEXT: alsl.d $a0, $a0, $a1, 2 ; LA64-NEXT: ret %b = mul i64 %a, 4100 ret i64 %b @@ -1151,9 +1141,8 @@ ; ; LA64-LABEL: mul_i64_4104: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 8 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: slli.d $a1, $a0, 12 +; LA64-NEXT: alsl.d $a0, $a0, $a1, 3 ; LA64-NEXT: ret %b = mul i64 %a, 4104 ret i64 %b @@ -1172,9 +1161,8 @@ ; ; LA64-LABEL: mul_i64_4112: ; LA64: # %bb.0: -; LA64-NEXT: lu12i.w $a1, 1 -; LA64-NEXT: ori $a1, $a1, 16 -; LA64-NEXT: mul.d $a0, $a0, $a1 +; LA64-NEXT: slli.d $a1, $a0, 12 +; LA64-NEXT: alsl.d $a0, $a0, $a1, 4 ; LA64-NEXT: ret %b = mul i64 %a, 4112 ret i64 %b