diff --git a/llvm/test/CodeGen/AArch64/avg.ll b/llvm/test/CodeGen/AArch64/avg.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/avg.ll @@ -0,0 +1,63 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s + +define <8 x i8> @avg_v8i8(<8 x i8> %x) { +; CHECK-LABEL: avg_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.8b v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %x, <8 x i8> %x) + ret <8 x i8> %r +} + +define <4 x i16> @avg_v4i16(<4 x i16> %x) { +; CHECK-LABEL: avg_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.4h v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %x, <4 x i16> %x) + ret <4 x i16> %r +} + +define <2 x i32> @avg_v2i16(<2 x i32> %x) { +; CHECK-LABEL: avg_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.2s v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %x, <2 x i32> %x) + ret <2 x i32> %r +} + +define <16 x i8> @avg_v16i8(<16 x i8> %x) { +; CHECK-LABEL: avg_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.16b v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %x, <16 x i8> %x) + ret <16 x i8> %r +} + +define <8 x i16> @avg_v8i16(<8 x i16> %x) { +; CHECK-LABEL: avg_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.8h v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %x, <8 x i16> %x) + ret <8 x i16> %r +} + +define <4 x i32> @avg_v4i32(<4 x i32> %x) { +; CHECK-LABEL: avg_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: srhadd.4s v0, v0, v0 +; CHECK-NEXT: ret + %r = tail call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %x, <4 x i32> %x) + ret <4 x i32> %r +} + +declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>) +declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>) +declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>) +declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>) +declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>) +declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>)