Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -3778,6 +3778,21 @@ def MVE_VMLAS_qr_u16 : MVE_VFMAMLA_qr<"vmlas", "u16", 0b1, 0b01, 0b1>; def MVE_VMLAS_qr_u32 : MVE_VFMAMLA_qr<"vmlas", "u32", 0b1, 0b10, 0b1>; +let Predicates = [HasMVEInt] in { + def : Pat<(v4i32 (add (v4i32 MQPR:$src1), + (v4i32 (mul (v4i32 MQPR:$src2), + (v4i32 (ARMvdup (i32 tGPR:$x))))))), + (v4i32 (MVE_VMLA_qr_u32 $src1, $src2, $x))>; + def : Pat<(v8i16 (add (v8i16 MQPR:$src1), + (v8i16 (mul (v8i16 MQPR:$src2), + (v8i16 (ARMvdup (i32 tGPR:$x))))))), + (v8i16 (MVE_VMLA_qr_u16 $src1, $src2, $x))>; + def : Pat<(v16i8 (add (v16i8 MQPR:$src1), + (v16i8 (mul (v16i8 MQPR:$src2), + (v16i8 (ARMvdup (i32 tGPR:$x))))))), + (v16i8 (MVE_VMLA_qr_u8 $src1, $src2, $x))>; +} + let Predicates = [HasMVEFloat] in { def MVE_VFMA_qr_f16 : MVE_VFMAMLA_qr<"vfma", "f16", 0b1, 0b11, 0b0>; def MVE_VFMA_qr_f32 : MVE_VFMAMLA_qr<"vfma", "f32", 0b0, 0b11, 0b0>; Index: llvm/test/CodeGen/Thumb2/mve-vmla.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-vmla.ll @@ -0,0 +1,80 @@ +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @vmlau32(<4 x i32> %A, <4 x i32> %B, i32 %X) nounwind { +; CHECK-LABEL: vmlau32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u32 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <4 x i32> undef, i32 %X, i32 0 + %1 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer + %2 = mul nsw <4 x i32> %B, %1 + %3 = add nsw <4 x i32> %A, %2 + ret <4 x i32> %3 +} + +define arm_aapcs_vfpcc <4 x i32> @vmlau32b(<4 x i32> %A, <4 x i32> %B, i32 %X) nounwind { +; CHECK-LABEL: vmlau32b: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u32 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <4 x i32> undef, i32 %X, i32 0 + %1 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer + %2 = mul nsw <4 x i32> %1, %B + %3 = add nsw <4 x i32> %2, %A + ret <4 x i32> %3 +} + +define arm_aapcs_vfpcc <8 x i16> @vmlau16(<8 x i16> %A, <8 x i16> %B, i16 %X) nounwind { +; CHECK-LABEL: vmlau16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u16 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <8 x i16> undef, i16 %X, i32 0 + %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer + %2 = mul nsw <8 x i16> %B, %1 + %3 = add nsw <8 x i16> %A, %2 + ret <8 x i16> %3 +} + +define arm_aapcs_vfpcc <8 x i16> @vmlau16b(<8 x i16> %A, <8 x i16> %B, i16 %X) nounwind { +; CHECK-LABEL: vmlau16b: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u16 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <8 x i16> undef, i16 %X, i32 0 + %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer + %2 = mul nsw <8 x i16> %1, %B + %3 = add nsw <8 x i16> %2, %A + ret <8 x i16> %3 +} + +define arm_aapcs_vfpcc <16 x i8> @vmlau8(<16 x i8> %A, <16 x i8> %B, i8 %X) nounwind { +; CHECK-LABEL: vmlau8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u8 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <16 x i8> undef, i8 %X, i32 0 + %1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer + %2 = mul nsw <16 x i8> %B, %1 + %3 = add nsw <16 x i8> %A, %2 + ret <16 x i8> %3 +} + +define arm_aapcs_vfpcc <16 x i8> @vmlau8b(<16 x i8> %A, <16 x i8> %B, i8 %X) nounwind { +; CHECK-LABEL: vmlau8b: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmla.u8 q0, q1, r0 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <16 x i8> undef, i8 %X, i32 0 + %1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer + %2 = mul nsw <16 x i8> %1, %B + %3 = add nsw <16 x i8> %2, %A + ret <16 x i8> %3 +} +