diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-abs.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=aarch64 -passes=slp-vectorizer | FileCheck %s
+
+@a = common global ptr null, align 8
+
+declare i64 @llvm.abs.i64(i64, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i8  @llvm.abs.i8 (i8,  i1)
+
+define void @abs_v2i64() {
+; CHECK-LABEL: @abs_v2i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr @a, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[TMP0]], i1 false)
+; CHECK-NEXT:    store <2 x i64> [[TMP1]], ptr @a, align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a0 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
+  %a1 = load i64, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
+  %r0 = call i64 @llvm.abs.i64(i64 %a0, i1 false)
+  %r1 = call i64 @llvm.abs.i64(i64 %a1, i1 false)
+  store i64 %r0, i64* getelementptr inbounds (i64, ptr @a, i64 0), align 8
+  store i64 %r1, i64* getelementptr inbounds (i64, ptr @a, i64 1), align 8
+  ret void
+}
+
+define void @abs_v4i32() {
+; CHECK-LABEL: @abs_v4i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr @a, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP0]], i1 false)
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr @a, align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a0 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
+  %a1 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
+  %a2 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
+  %a3 = load i32, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
+  %r0 = call i32 @llvm.abs.i32(i32 %a0, i1 false)
+  %r1 = call i32 @llvm.abs.i32(i32 %a1, i1 false)
+  %r2 = call i32 @llvm.abs.i32(i32 %a2, i1 false)
+  %r3 = call i32 @llvm.abs.i32(i32 %a3, i1 false)
+  store i32 %r0, i32* getelementptr inbounds (i32, ptr @a, i64 0), align 8
+  store i32 %r1, i32* getelementptr inbounds (i32, ptr @a, i64 1), align 8
+  store i32 %r2, i32* getelementptr inbounds (i32, ptr @a, i64 2), align 8
+  store i32 %r3, i32* getelementptr inbounds (i32, ptr @a, i64 3), align 8
+  ret void
+}
+
+define void @abs_v8i16() {
+; CHECK-LABEL: @abs_v8i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr @a, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[TMP0]], i1 false)
+; CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr @a, align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a0  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
+  %a1  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
+  %a2  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
+  %a3  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
+  %a4  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
+  %a5  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
+  %a6  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
+  %a7  = load i16, i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
+  %r0  = call i16 @llvm.abs.i16(i16 %a0, i1 false)
+  %r1  = call i16 @llvm.abs.i16(i16 %a1, i1 false)
+  %r2  = call i16 @llvm.abs.i16(i16 %a2, i1 false)
+  %r3  = call i16 @llvm.abs.i16(i16 %a3, i1 false)
+  %r4  = call i16 @llvm.abs.i16(i16 %a4, i1 false)
+  %r5  = call i16 @llvm.abs.i16(i16 %a5, i1 false)
+  %r6  = call i16 @llvm.abs.i16(i16 %a6, i1 false)
+  %r7  = call i16 @llvm.abs.i16(i16 %a7, i1 false)
+  store i16 %r0,  i16* getelementptr inbounds (i16, ptr @a, i64 0), align 8
+  store i16 %r1,  i16* getelementptr inbounds (i16, ptr @a, i64 1), align 8
+  store i16 %r2,  i16* getelementptr inbounds (i16, ptr @a, i64 2), align 8
+  store i16 %r3,  i16* getelementptr inbounds (i16, ptr @a, i64 3), align 8
+  store i16 %r4,  i16* getelementptr inbounds (i16, ptr @a, i64 4), align 8
+  store i16 %r5,  i16* getelementptr inbounds (i16, ptr @a, i64 5), align 8
+  store i16 %r6,  i16* getelementptr inbounds (i16, ptr @a, i64 6), align 8
+  store i16 %r7,  i16* getelementptr inbounds (i16, ptr @a, i64 7), align 8
+  ret void
+}
+
+define void @abs_v16i8() {
+; CHECK-LABEL: @abs_v16i8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr @a, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[TMP0]], i1 false)
+; CHECK-NEXT:    store <16 x i8> [[TMP1]], ptr @a, align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a0  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  0), align 8
+  %a1  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  1), align 8
+  %a2  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  2), align 8
+  %a3  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  3), align 8
+  %a4  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  4), align 8
+  %a5  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  5), align 8
+  %a6  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  6), align 8
+  %a7  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  7), align 8
+  %a8  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  8), align 8
+  %a9  = load i8, i8* getelementptr inbounds (i8, ptr @a, i64  9), align 8
+  %a10 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
+  %a11 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
+  %a12 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
+  %a13 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
+  %a14 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
+  %a15 = load i8, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
+  %r0  = call i8 @llvm.abs.i8(i8 %a0,  i1 false)
+  %r1  = call i8 @llvm.abs.i8(i8 %a1,  i1 false)
+  %r2  = call i8 @llvm.abs.i8(i8 %a2,  i1 false)
+  %r3  = call i8 @llvm.abs.i8(i8 %a3,  i1 false)
+  %r4  = call i8 @llvm.abs.i8(i8 %a4,  i1 false)
+  %r5  = call i8 @llvm.abs.i8(i8 %a5,  i1 false)
+  %r6  = call i8 @llvm.abs.i8(i8 %a6,  i1 false)
+  %r7  = call i8 @llvm.abs.i8(i8 %a7,  i1 false)
+  %r8  = call i8 @llvm.abs.i8(i8 %a8,  i1 false)
+  %r9  = call i8 @llvm.abs.i8(i8 %a9,  i1 false)
+  %r10 = call i8 @llvm.abs.i8(i8 %a10, i1 false)
+  %r11 = call i8 @llvm.abs.i8(i8 %a11, i1 false)
+  %r12 = call i8 @llvm.abs.i8(i8 %a12, i1 false)
+  %r13 = call i8 @llvm.abs.i8(i8 %a13, i1 false)
+  %r14 = call i8 @llvm.abs.i8(i8 %a14, i1 false)
+  %r15 = call i8 @llvm.abs.i8(i8 %a15, i1 false)
+  store i8 %r0,  i8* getelementptr inbounds (i8, ptr @a, i64  0), align 8
+  store i8 %r1,  i8* getelementptr inbounds (i8, ptr @a, i64  1), align 8
+  store i8 %r2,  i8* getelementptr inbounds (i8, ptr @a, i64  2), align 8
+  store i8 %r3,  i8* getelementptr inbounds (i8, ptr @a, i64  3), align 8
+  store i8 %r4,  i8* getelementptr inbounds (i8, ptr @a, i64  4), align 8
+  store i8 %r5,  i8* getelementptr inbounds (i8, ptr @a, i64  5), align 8
+  store i8 %r6,  i8* getelementptr inbounds (i8, ptr @a, i64  6), align 8
+  store i8 %r7,  i8* getelementptr inbounds (i8, ptr @a, i64  7), align 8
+  store i8 %r8,  i8* getelementptr inbounds (i8, ptr @a, i64  8), align 8
+  store i8 %r9,  i8* getelementptr inbounds (i8, ptr @a, i64  9), align 8
+  store i8 %r10, i8* getelementptr inbounds (i8, ptr @a, i64 10), align 8
+  store i8 %r11, i8* getelementptr inbounds (i8, ptr @a, i64 11), align 8
+  store i8 %r12, i8* getelementptr inbounds (i8, ptr @a, i64 12), align 8
+  store i8 %r13, i8* getelementptr inbounds (i8, ptr @a, i64 13), align 8
+  store i8 %r14, i8* getelementptr inbounds (i8, ptr @a, i64 14), align 8
+  store i8 %r15, i8* getelementptr inbounds (i8, ptr @a, i64 15), align 8
+  ret void
+}