diff --git a/llvm/test/CodeGen/AArch64/neon-mla-mls.ll b/llvm/test/CodeGen/AArch64/neon-mla-mls.ll --- a/llvm/test/CodeGen/AArch64/neon-mla-mls.ll +++ b/llvm/test/CodeGen/AArch64/neon-mla-mls.ll @@ -1,85 +1,134 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) { -;CHECK: mla {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +; CHECK-LABEL: mla8xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <8 x i8> %A, %B; %tmp2 = add <8 x i8> %C, %tmp1; ret <8 x i8> %tmp2 } define <16 x i8> @mla16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) { -;CHECK: mla {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +; CHECK-LABEL: mla16xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <16 x i8> %A, %B; %tmp2 = add <16 x i8> %C, %tmp1; ret <16 x i8> %tmp2 } define <4 x i16> @mla4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) { -;CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +; CHECK-LABEL: mla4xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.4h, v0.4h, v1.4h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <4 x i16> %A, %B; %tmp2 = add <4 x i16> %C, %tmp1; ret <4 x i16> %tmp2 } define <8 x i16> @mla8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) { -;CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +; CHECK-LABEL: mla8xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.8h, v0.8h, v1.8h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <8 x i16> %A, %B; %tmp2 = add <8 x i16> %C, %tmp1; ret <8 x i16> %tmp2 } define <2 x i32> @mla2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) { -;CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +; CHECK-LABEL: mla2xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.2s, v0.2s, v1.2s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <2 x i32> %A, %B; %tmp2 = add <2 x i32> %C, %tmp1; ret <2 x i32> %tmp2 } define <4 x i32> @mla4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) { -;CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +; CHECK-LABEL: mla4xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <4 x i32> %A, %B; %tmp2 = add <4 x i32> %C, %tmp1; ret <4 x i32> %tmp2 } define <8 x i8> @mls8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) { -;CHECK: mls {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +; CHECK-LABEL: mls8xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <8 x i8> %A, %B; %tmp2 = sub <8 x i8> %C, %tmp1; ret <8 x i8> %tmp2 } define <16 x i8> @mls16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) { -;CHECK: mls {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +; CHECK-LABEL: mls16xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <16 x i8> %A, %B; %tmp2 = sub <16 x i8> %C, %tmp1; ret <16 x i8> %tmp2 } define <4 x i16> @mls4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) { -;CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +; CHECK-LABEL: mls4xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.4h, v0.4h, v1.4h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <4 x i16> %A, %B; %tmp2 = sub <4 x i16> %C, %tmp1; ret <4 x i16> %tmp2 } define <8 x i16> @mls8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) { -;CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +; CHECK-LABEL: mls8xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.8h, v0.8h, v1.8h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <8 x i16> %A, %B; %tmp2 = sub <8 x i16> %C, %tmp1; ret <8 x i16> %tmp2 } define <2 x i32> @mls2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) { -;CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +; CHECK-LABEL: mls2xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.2s, v0.2s, v1.2s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <2 x i32> %A, %B; %tmp2 = sub <2 x i32> %C, %tmp1; ret <2 x i32> %tmp2 } define <4 x i32> @mls4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) { -;CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +; CHECK-LABEL: mls4xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: mls v2.4s, v0.4s, v1.4s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret %tmp1 = mul <4 x i32> %A, %B; %tmp2 = sub <4 x i32> %C, %tmp1; ret <4 x i32> %tmp2 diff --git a/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll b/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll @@ -0,0 +1,59 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc %s --mtriple aarch64 -verify-machineinstrs -o - | FileCheck %s + +define dso_local void @jsimd_idct_ifast_neon_intrinsic(i8* nocapture readonly %dct_table, i16* nocapture readonly %coef_block, i8** nocapture readonly %output_buf, i32 %output_col) local_unnamed_addr #0 { +; CHECK-LABEL: jsimd_idct_ifast_neon_intrinsic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr q0, [x1, #32] +; CHECK-NEXT: ldr q1, [x0, #32] +; CHECK-NEXT: ldr q2, [x1, #96] +; CHECK-NEXT: ldr q3, [x0, #96] +; CHECK-NEXT: ldr x8, [x2, #48] +; CHECK-NEXT: mul v0.8h, v1.8h, v0.8h +; CHECK-NEXT: mov v1.16b, v0.16b +; CHECK-NEXT: mla v1.8h, v3.8h, v2.8h +; CHECK-NEXT: mov w9, w3 +; CHECK-NEXT: str q1, [x8, x9] +; CHECK-NEXT: ldr x8, [x2, #56] +; CHECK-NEXT: mls v0.8h, v3.8h, v2.8h +; CHECK-NEXT: str q0, [x8, x9] +; CHECK-NEXT: ret +entry: + %add.ptr5 = getelementptr inbounds i16, i16* %coef_block, i64 16 + %0 = bitcast i16* %add.ptr5 to <8 x i16>* + %1 = load <8 x i16>, <8 x i16>* %0, align 16 + + %add.ptr17 = getelementptr inbounds i16, i16* %coef_block, i64 48 + %2 = bitcast i16* %add.ptr17 to <8 x i16>* + %3 = load <8 x i16>, <8 x i16>* %2, align 16 + + %add.ptr29 = getelementptr inbounds i8, i8* %dct_table, i64 32 + %4 = bitcast i8* %add.ptr29 to <8 x i16>* + %5 = load <8 x i16>, <8 x i16>* %4, align 16 + + %add.ptr41 = getelementptr inbounds i8, i8* %dct_table, i64 96 + %6 = bitcast i8* %add.ptr41 to <8 x i16>* + %7 = load <8 x i16>, <8 x i16>* %6, align 16 + + %mul.i966 = mul <8 x i16> %5, %1 + %mul.i964 = mul <8 x i16> %7, %3 + + %add.i961 = add <8 x i16> %mul.i966, %mul.i964 + %sub.i960 = sub <8 x i16> %mul.i966, %mul.i964 + + %idx.ext = zext i32 %output_col to i64 + + %arrayidx404 = getelementptr inbounds i8*, i8** %output_buf, i64 6 + %8 = load i8*, i8** %arrayidx404, align 8 + %add.ptr406 = getelementptr inbounds i8, i8* %8, i64 %idx.ext + %9 = bitcast i8* %add.ptr406 to <8 x i16>* + store <8 x i16> %add.i961, <8 x i16>* %9, align 8 + + %arrayidx408 = getelementptr inbounds i8*, i8** %output_buf, i64 7 + %10 = load i8*, i8** %arrayidx408, align 8 + %add.ptr410 = getelementptr inbounds i8, i8* %10, i64 %idx.ext + %11 = bitcast i8* %add.ptr410 to <8 x i16>* + store <8 x i16> %sub.i960, <8 x i16>* %11, align 8 + + ret void +}