diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -87,7 +87,8 @@ let ParserMatchClass = UImmAsmOperand<2>; } -def uimm2_plus1 : Operand { +def uimm2_plus1 : Operand, + ImmLeaf(Imm - 1);}]> { let ParserMatchClass = UImmAsmOperand<2, "plus1">; let EncoderMethod = "getImmOpValueSub1"; let DecoderMethod = "decodeUImmOperand<2, 1>"; @@ -733,6 +734,19 @@ (ADDI_D (i64 BaseAddr:$rj), simm12:$imm12)>; } // Predicates = [IsLA64] +/// Shifted addition +let Predicates = [IsLA32] in { +def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)), + (ALSL_W GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>; +} // Predicates = [IsLA32] +let Predicates = [IsLA64] in { +def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)), + (ALSL_D GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>; +def : Pat<(loongarch_bstrpick (add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)), + (i64 31), (i64 0)), + (ALSL_WU GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>; +} // Predicates = [IsLA64] + /// Shift let Predicates = [IsLA32] in { diff --git a/llvm/test/CodeGen/LoongArch/alsl.ll b/llvm/test/CodeGen/LoongArch/alsl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/alsl.ll @@ -0,0 +1,286 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64 + +define i8 @alsl_i8(i8 signext %a, i8 signext %b) nounwind { +; LA32-LABEL: alsl_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 1 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 1 +; LA64-NEXT: ret +entry: + %mul = mul nsw i8 %a, 2 + %add = add nsw i8 %b, %mul + ret i8 %add +} + +define i16 @alsl_i16(i16 signext %a, i16 signext %b) nounwind { +; LA32-LABEL: alsl_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 2 +; LA64-NEXT: ret +entry: + %mul = mul nsw i16 %a, 4 + %add = add nsw i16 %b, %mul + ret i16 %add +} + +define i32 @alsl_i32(i32 signext %a, i32 signext %b) nounwind { +; LA32-LABEL: alsl_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 3 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 3 +; LA64-NEXT: ret +entry: + %mul = mul nsw i32 %a, 8 + %add = add nsw i32 %b, %mul + ret i32 %add +} + +define i64 @alsl_i64(i64 signext %a, i64 signext %b) nounwind { +; LA32-LABEL: alsl_i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: slli.w $a1, $a1, 4 +; LA32-NEXT: srli.w $a4, $a0, 28 +; LA32-NEXT: or $a1, $a1, $a4 +; LA32-NEXT: add.w $a1, $a3, $a1 +; LA32-NEXT: alsl.w $a0, $a0, $a2, 4 +; LA32-NEXT: sltu $a2, $a0, $a2 +; LA32-NEXT: add.w $a1, $a1, $a2 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 4 +; LA64-NEXT: ret +entry: + %mul = mul nsw i64 %a, 16 + %add = add nsw i64 %b, %mul + ret i64 %add +} + +define i32 @alsl_zext_i8(i8 signext %a, i8 signext %b) nounwind { +; LA32-LABEL: alsl_zext_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 1 +; LA32-NEXT: andi $a0, $a0, 255 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_zext_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 1 +; LA64-NEXT: andi $a0, $a0, 255 +; LA64-NEXT: ret +entry: + %mul = mul nsw i8 %a, 2 + %add = add nsw i8 %b, %mul + %zext = zext i8 %add to i32 + ret i32 %zext +} + +define i32 @alsl_zext_i16(i16 signext %a, i16 signext %b) nounwind { +; LA32-LABEL: alsl_zext_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 2 +; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_zext_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.d $a0, $a0, $a1, 2 +; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i16 %a, 4 + %add = add nsw i16 %b, %mul + %zext = zext i16 %add to i32 + ret i32 %zext +} + +define i64 @alsl_zext_i32(i32 signext %a, i32 signext %b) nounwind { +; LA32-LABEL: alsl_zext_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: alsl.w $a0, $a0, $a1, 3 +; LA32-NEXT: move $a1, $zero +; LA32-NEXT: ret +; +; LA64-LABEL: alsl_zext_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: alsl.wu $a0, $a0, $a1, 3 +; LA64-NEXT: ret +entry: + %mul = mul nsw i32 %a, 8 + %add = add nsw i32 %b, %mul + %zext = zext i32 %add to i64 + ret i64 %zext +} + +;; Check that alsl.w or alsl.d is not emitted. +define i8 @mul_add_i8(i8 signext %a, i8 signext %b) nounwind { +; LA32-LABEL: mul_add_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 3 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 3 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i8 %a, 3 + %add = add nsw i8 %b, %mul + ret i8 %add +} + +define i16 @mul_add_i16(i16 signext %a, i16 signext %b) nounwind { +; LA32-LABEL: mul_add_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 10 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 10 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i16 %a, 10 + %add = add nsw i16 %b, %mul + ret i16 %add +} + +define i32 @mul_add_i32(i32 signext %a, i32 signext %b) nounwind { +; LA32-LABEL: mul_add_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 12 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 12 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i32 %a, 12 + %add = add nsw i32 %b, %mul + ret i32 %add +} + +define i64 @mul_add_i64(i64 signext %a, i64 signext %b) nounwind { +; LA32-LABEL: mul_add_i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a4, $zero, 15 +; LA32-NEXT: mul.w $a1, $a1, $a4 +; LA32-NEXT: mulh.wu $a5, $a0, $a4 +; LA32-NEXT: add.w $a1, $a5, $a1 +; LA32-NEXT: add.w $a1, $a3, $a1 +; LA32-NEXT: mul.w $a0, $a0, $a4 +; LA32-NEXT: add.w $a0, $a2, $a0 +; LA32-NEXT: sltu $a2, $a0, $a2 +; LA32-NEXT: add.w $a1, $a1, $a2 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 15 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i64 %a, 15 + %add = add nsw i64 %b, %mul + ret i64 %add +} + +define i32 @mul_add_zext_i8(i8 signext %a, i8 signext %b) nounwind { +; LA32-LABEL: mul_add_zext_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 5 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: andi $a0, $a0, 255 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_zext_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 5 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: andi $a0, $a0, 255 +; LA64-NEXT: ret +entry: + %mul = mul nsw i8 %a, 5 + %add = add nsw i8 %b, %mul + %zext = zext i8 %add to i32 + ret i32 %zext +} + +define i32 @mul_add_zext_i16(i16 signext %a, i16 signext %b) nounwind { +; LA32-LABEL: mul_add_zext_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 15 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_zext_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 15 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i16 %a, 15 + %add = add nsw i16 %b, %mul + %zext = zext i16 %add to i32 + ret i32 %zext +} + +;; Check that alsl.wu is not emitted. +define i64 @mul_add_zext_i32(i32 signext %a, i32 signext %b) nounwind { +; LA32-LABEL: mul_add_zext_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ori $a2, $zero, 5 +; LA32-NEXT: mul.w $a0, $a0, $a2 +; LA32-NEXT: add.w $a0, $a1, $a0 +; LA32-NEXT: move $a1, $zero +; LA32-NEXT: ret +; +; LA64-LABEL: mul_add_zext_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ori $a2, $zero, 5 +; LA64-NEXT: mul.d $a0, $a0, $a2 +; LA64-NEXT: add.d $a0, $a1, $a0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: ret +entry: + %mul = mul nsw i32 %a, 5 + %add = add nsw i32 %b, %mul + %zext = zext i32 %add to i64 + ret i64 %zext +} diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-fp.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-fp.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-fp.ll @@ -36,8 +36,7 @@ define double @fldx_d(ptr %a, i64 %idx) nounwind { ; LA32F-LABEL: fldx_d: ; LA32F: # %bb.0: -; LA32F-NEXT: slli.w $a1, $a1, 3 -; LA32F-NEXT: add.w $a1, $a0, $a1 +; LA32F-NEXT: alsl.w $a1, $a1, $a0, 3 ; LA32F-NEXT: ld.w $a0, $a1, 0 ; LA32F-NEXT: ld.w $a1, $a1, 4 ; LA32F-NEXT: ret @@ -96,8 +95,7 @@ define void @fstx_d(ptr %dst, i64 %idx, double %val) nounwind { ; LA32F-LABEL: fstx_d: ; LA32F: # %bb.0: -; LA32F-NEXT: slli.w $a1, $a1, 3 -; LA32F-NEXT: add.w $a0, $a0, $a1 +; LA32F-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32F-NEXT: st.w $a4, $a0, 4 ; LA32F-NEXT: st.w $a3, $a0, 0 ; LA32F-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store.ll @@ -252,8 +252,7 @@ define i64 @ldx_h(ptr %a, i64 %idx) nounwind { ; LA32-LABEL: ldx_h: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 1 -; LA32-NEXT: add.w $a1, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a1, $a0, 1 ; LA32-NEXT: ld.h $a2, $a1, 0 ; LA32-NEXT: ld.h $a0, $a0, 0 ; LA32-NEXT: srai.w $a1, $a2, 31 @@ -277,8 +276,7 @@ define i64 @ldx_w(ptr %a, i64 %idx) nounwind { ; LA32-LABEL: ldx_w: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 2 -; LA32-NEXT: add.w $a1, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a1, $a0, 2 ; LA32-NEXT: ld.w $a2, $a1, 0 ; LA32-NEXT: ld.w $a0, $a0, 0 ; LA32-NEXT: srai.w $a1, $a2, 31 @@ -302,8 +300,7 @@ define i64 @ldx_d(ptr %a, i64 %idx) nounwind { ; LA32-LABEL: ldx_d: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 3 -; LA32-NEXT: add.w $a1, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a1, $a0, 3 ; LA32-NEXT: ld.w $a2, $a1, 0 ; LA32-NEXT: ld.w $a3, $a0, 0 ; LA32-NEXT: ld.w $a1, $a1, 4 @@ -352,8 +349,7 @@ define i64 @ldx_hu(ptr %a, i64 %idx) nounwind { ; LA32-LABEL: ldx_hu: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 1 -; LA32-NEXT: add.w $a1, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a1, $a0, 1 ; LA32-NEXT: ld.hu $a1, $a1, 0 ; LA32-NEXT: ld.hu $a0, $a0, 0 ; LA32-NEXT: add.w $a0, $a1, $a0 @@ -379,8 +375,7 @@ define i64 @ldx_wu(ptr %a, i64 %idx) nounwind { ; LA32-LABEL: ldx_wu: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 2 -; LA32-NEXT: add.w $a1, $a0, $a1 +; LA32-NEXT: alsl.w $a1, $a1, $a0, 2 ; LA32-NEXT: ld.w $a1, $a1, 0 ; LA32-NEXT: ld.w $a0, $a0, 0 ; LA32-NEXT: add.w $a0, $a1, $a0 @@ -480,8 +475,7 @@ define void @stx_h(ptr %dst, i64 %idx, i16 %val) nounwind { ; LA32-LABEL: stx_h: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 1 -; LA32-NEXT: add.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 1 ; LA32-NEXT: st.h $a3, $a0, 0 ; LA32-NEXT: ret ; @@ -498,8 +492,7 @@ define void @stx_w(ptr %dst, i64 %idx, i32 %val) nounwind { ; LA32-LABEL: stx_w: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 2 -; LA32-NEXT: add.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 2 ; LA32-NEXT: st.w $a3, $a0, 0 ; LA32-NEXT: ret ; @@ -516,8 +509,7 @@ define void @stx_d(ptr %dst, i64 %idx, i64 %val) nounwind { ; LA32-LABEL: stx_d: ; LA32: # %bb.0: -; LA32-NEXT: slli.w $a1, $a1, 3 -; LA32-NEXT: add.w $a0, $a0, $a1 +; LA32-NEXT: alsl.w $a0, $a1, $a0, 3 ; LA32-NEXT: st.w $a4, $a0, 4 ; LA32-NEXT: st.w $a3, $a0, 0 ; LA32-NEXT: ret