diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -135,7 +135,7 @@ defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", umax>; defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", umin>; - defm MUL_ZI : sve_int_arith_imm2<"mul">; + defm MUL_ZI : sve_int_arith_imm2<"mul", mul>; defm MUL_ZPmZ : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>; defm SMULH_ZPmZ : sve_int_bin_pred_arit_2<0b010, "smulh", int_aarch64_sve_smulh>; defm UMULH_ZPmZ : sve_int_bin_pred_arit_2<0b011, "umulh", int_aarch64_sve_umulh>; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -3537,11 +3537,16 @@ def : SVE_1_Op_Imm_Arith_Pat(NAME # _D)>; } -multiclass sve_int_arith_imm2 { +multiclass sve_int_arith_imm2 { def _B : sve_int_arith_imm<0b00, 0b110000, asm, ZPR8, simm8>; def _H : sve_int_arith_imm<0b01, 0b110000, asm, ZPR16, simm8>; def _S : sve_int_arith_imm<0b10, 0b110000, asm, ZPR32, simm8>; def _D : sve_int_arith_imm<0b11, 0b110000, asm, ZPR64, simm8>; + + def : SVE_1_Op_Imm_Arith_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_Arith_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_Arith_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_Arith_Pat(NAME # _D)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll @@ -363,3 +363,86 @@ %res = select %cmp, %a, %splat ret %res } + +; +; MUL +; +define @mul_i8_neg( %a) { +; CHECK-LABEL: mul_i8_neg +; CHECK: mul z0.b, z0.b, #-17 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 -17, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i8_pos( %a) { +; CHECK-LABEL: mul_i8_pos +; CHECK: mul z0.b, z0.b, #105 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 105, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i16_neg( %a) { +; CHECK-LABEL: mul_i16_neg +; CHECK: mul z0.h, z0.h, #-17 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -17, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i16_pos( %a) { +; CHECK-LABEL: mul_i16_pos +; CHECK: mul z0.h, z0.h, #105 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 105, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i32_neg( %a) { +; CHECK-LABEL: mul_i32_neg +; CHECK: mul z0.s, z0.s, #-17 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 -17, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i32_pos( %a) { +; CHECK-LABEL: mul_i32_pos +; CHECK: mul z0.s, z0.s, #105 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 105, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i64_neg( %a) { +; CHECK-LABEL: mul_i64_neg +; CHECK: mul z0.d, z0.d, #-17 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 -17, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} + +define @mul_i64_pos( %a) { +; CHECK-LABEL: mul_i64_pos +; CHECK: mul z0.d, z0.d, #105 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 105, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} diff --git a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm-2.ll @@ -0,0 +1,12 @@ +; RUN: not llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s + +; Numbers smaller than -127 and greater than or equal to 127 are not allowed. +; This should get lowered to a regular vector multiply and these tests should +; be updated when those patterns are added. + +define @mul_i64_neg_1( %a) { + %elt = insertelement undef, i64 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +} diff --git a/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-neg-int-arith-imm.ll @@ -0,0 +1,11 @@ +; RUN: not llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s + +; Numbers smaller than -127 and greater than or equal to 127 allowed for imm mul. +; This should get lowered to a regular vector multiply and these tests should +; be updated when those patterns are added. +define @mul_i64_neg_1( %a) { + %elt = insertelement undef, i64 -130, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %res = mul %a, %splat + ret %res +}