diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -5653,6 +5653,25 @@ defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>; defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>; +// Patterns for uaddv(uaddlp(x)) ==> uaddlv +def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef, + (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))), + (i64 0))), (i64 0))), + (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), + (UADDLVv4i16v V64:$op), ssub), ssub)>; +def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp + (v16i8 V128:$op))))), (i64 0))), + (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), + (UADDLVv16i8v V128:$op), hsub), ssub)>; +def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))), + (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>; + +// Patterns for addp(uaddlp(x))) ==> uaddlv +def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))), + (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>; +def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))), + (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>; + // Patterns for across-vector intrinsics, that have a node equivalent, that // returns a vector (with only the low lane defined) instead of a scalar. // In effect, opNode is the same as (scalar_to_vector (IntNode)). diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -220,8 +220,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uabdl.8h v2, v0, v1 ; CHECK-NEXT: uabal2.8h v2, v0, v1 -; CHECK-NEXT: uaddlp.4s v0, v2 -; CHECK-NEXT: addv.4s s0, v0 +; CHECK-NEXT: uaddlv.8h s0, v2 ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %aext = zext <16 x i8> %a to <16 x i32> @@ -239,8 +238,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: sabdl.8h v2, v0, v1 ; CHECK-NEXT: sabal2.8h v2, v0, v1 -; CHECK-NEXT: uaddlp.4s v0, v2 -; CHECK-NEXT: addv.4s s0, v0 +; CHECK-NEXT: uaddlv.8h s0, v2 ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %aext = sext <16 x i8> %a to <16 x i32> diff --git a/llvm/test/CodeGen/AArch64/neon-sad.ll b/llvm/test/CodeGen/AArch64/neon-sad.ll --- a/llvm/test/CodeGen/AArch64/neon-sad.ll +++ b/llvm/test/CodeGen/AArch64/neon-sad.ll @@ -11,8 +11,7 @@ ; CHECK-NEXT: ldr q1, [x1] ; CHECK-NEXT: uabdl v2.8h, v1.8b, v0.8b ; CHECK-NEXT: uabal2 v2.8h, v1.16b, v0.16b -; CHECK-NEXT: uaddlp v0.4s, v2.8h -; CHECK-NEXT: addv s0, v0.4s +; CHECK-NEXT: uaddlv s0, v2.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: @@ -35,8 +34,7 @@ ; CHECK-NEXT: ldr q1, [x1] ; CHECK-NEXT: sabdl v2.8h, v1.8b, v0.8b ; CHECK-NEXT: sabal2 v2.8h, v1.16b, v0.16b -; CHECK-NEXT: uaddlp v0.4s, v2.8h -; CHECK-NEXT: addv s0, v0.4s +; CHECK-NEXT: uaddlv s0, v2.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/neon-uaddlv.ll b/llvm/test/CodeGen/AArch64/neon-uaddlv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/neon-uaddlv.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s + +declare <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone +declare <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone +declare <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone +declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone +declare <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone + +declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) nounwind readnone +declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) nounwind readnone +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone +declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone +declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone + +define i16 @uaddlv4h_from_v8i8(<8 x i8>* %A) nounwind { +; CHECK-LABEL: uaddlv4h_from_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: uaddlv s0, v0.4h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1) + %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3) + ret i16 %tmp5 +} + +define i16 @uaddlv16b_from_v16i8(<16 x i8>* %A) nounwind { +; CHECK-LABEL: uaddlv16b_from_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: uaddlv h0, v0.16b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %tmp1 = load <16 x i8>, <16 x i8>* %A + %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1) + %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3) + ret i16 %tmp5 +} + +define i32 @uaddlv8h_from_v8i16(<8 x i16>* %A) nounwind { +; CHECK-LABEL: uaddlv8h_from_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: uaddlv s0, v0.8h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1) + %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3) + ret i32 %tmp5 +} + +define i64 @uaddlv4s_from_v4i32(<4 x i32>* %A) nounwind { +; CHECK-LABEL: uaddlv4s_from_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: uaddlv d0, v0.4s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1) + %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3) + ret i64 %tmp5 +} + +define i32 @uaddlv4h_from_v4i16(<4 x i16>* %A) nounwind { +; CHECK-LABEL: uaddlv4h_from_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: uaddlv s0, v0.4h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1) + %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3) + ret i32 %tmp5 +}