diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -4792,6 +4792,44 @@ defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw", BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>; +// Additional patterns for [SU]ML[AS]L +multiclass Neon_mul_acc_widen_patterns { + def : Pat<(v4i16 (opnode + V64:$Ra, + (v4i16 (extract_subvector + (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)), + (i64 0))))), + (EXTRACT_SUBREG (v8i16 (INST8B + (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub), + V64:$Rn, V64:$Rm)), dsub)>; + def : Pat<(v2i32 (opnode + V64:$Ra, + (v2i32 (extract_subvector + (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)), + (i64 0))))), + (EXTRACT_SUBREG (v4i32 (INST4H + (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub), + V64:$Rn, V64:$Rm)), dsub)>; + def : Pat<(v1i64 (opnode + V64:$Ra, + (v1i64 (extract_subvector + (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)), + (i64 0))))), + (EXTRACT_SUBREG (v2i64 (INST2S + (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub), + V64:$Rn, V64:$Rm)), dsub)>; +} + +defm : Neon_mul_acc_widen_patterns; +defm : Neon_mul_acc_widen_patterns; +defm : Neon_mul_acc_widen_patterns; +defm : Neon_mul_acc_widen_patterns; + // Additional patterns for SMULL and UMULL multiclass Neon_mul_widen_patterns { diff --git a/llvm/test/CodeGen/AArch64/mla_mls_merge.ll b/llvm/test/CodeGen/AArch64/mla_mls_merge.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/mla_mls_merge.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s + +define <4 x i16> @test_mla0(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { +; CHECK-LABEL: test_mla0: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v2.8h, v2.8b, v3.8b +; CHECK-NEXT: umlal v2.8h, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b) + %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %c, <8 x i8> %d) + %add.i = add <8 x i16> %vmull.i.i, %vmull.i + %shuffle.i = shufflevector <8 x i16> %add.i, <8 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle.i +} + + +define <4 x i16> @test_mla1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { +; CHECK-LABEL: test_mla1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v2.8h, v2.8b, v3.8b +; CHECK-NEXT: smlal v2.8h, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b) + %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %c, <8 x i8> %d) + %add.i = add <8 x i16> %vmull.i.i, %vmull.i + %shuffle.i = shufflevector <8 x i16> %add.i, <8 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle.i +} + + +define <2 x i32> @test_mla2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { +; CHECK-LABEL: test_mla2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v2.4s, v2.4h, v3.4h +; CHECK-NEXT: umlal v2.4s, v0.4h, v1.4h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b) + %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %c, <4 x i16> %d) + %add.i = add <4 x i32> %vmull2.i.i, %vmull2.i + %shuffle.i = shufflevector <4 x i32> %add.i, <4 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle.i +} + + +define <2 x i32> @test_mla3(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { +; CHECK-LABEL: test_mla3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v2.4s, v2.4h, v3.4h +; CHECK-NEXT: smlal v2.4s, v0.4h, v1.4h +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b) + %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %c, <4 x i16> %d) + %add.i = add <4 x i32> %vmull2.i.i, %vmull2.i + %shuffle.i = shufflevector <4 x i32> %add.i, <4 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle.i +} + + +define <1 x i64> @test_mla4(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { +; CHECK-LABEL: test_mla4: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v2.2d, v2.2s, v3.2s +; CHECK-NEXT: umlal v2.2d, v0.2s, v1.2s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b) + %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %c, <2 x i32> %d) + %add.i = add <2 x i64> %vmull2.i.i, %vmull2.i + %shuffle.i = shufflevector <2 x i64> %add.i, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + + +define <1 x i64> @test_mla5(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { +; CHECK-LABEL: test_mla5: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v2.2d, v2.2s, v3.2s +; CHECK-NEXT: smlal v2.2d, v0.2s, v1.2s +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b) + %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %c, <2 x i32> %d) + %add.i = add <2 x i64> %vmull2.i.i, %vmull2.i + %shuffle.i = shufflevector <2 x i64> %add.i, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + + +define <4 x i16> @test_mls0(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { +; CHECK-LABEL: test_mls0: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v0.8h, v0.8b, v1.8b +; CHECK-NEXT: umlsl v0.8h, v2.8b, v3.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b) + %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %c, <8 x i8> %d) + %sub.i = sub <8 x i16> %vmull.i, %vmull.i.i + %shuffle.i = shufflevector <8 x i16> %sub.i, <8 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle.i +} + + +define <4 x i16> @test_mls1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) { +; CHECK-LABEL: test_mls1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v0.8h, v0.8b, v1.8b +; CHECK-NEXT: smlsl v0.8h, v2.8b, v3.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b) + %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %c, <8 x i8> %d) + %sub.i = sub <8 x i16> %vmull.i, %vmull.i.i + %shuffle.i = shufflevector <8 x i16> %sub.i, <8 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle.i +} + + +define <2 x i32> @test_mls2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { +; CHECK-LABEL: test_mls2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h +; CHECK-NEXT: umlsl v0.4s, v2.4h, v3.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b) + %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %c, <4 x i16> %d) + %sub.i = sub <4 x i32> %vmull2.i, %vmull2.i.i + %shuffle.i = shufflevector <4 x i32> %sub.i, <4 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle.i +} + + +define <2 x i32> @test_mls3(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) { +; CHECK-LABEL: test_mls3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h +; CHECK-NEXT: smlsl v0.4s, v2.4h, v3.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b) + %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %c, <4 x i16> %d) + %sub.i = sub <4 x i32> %vmull2.i, %vmull2.i.i + %shuffle.i = shufflevector <4 x i32> %sub.i, <4 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle.i +} + + +define <1 x i64> @test_mls4(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { +; CHECK-LABEL: test_mls4: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: umlsl v0.2d, v2.2s, v3.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b) + %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %c, <2 x i32> %d) + %sub.i = sub <2 x i64> %vmull2.i, %vmull2.i.i + %shuffle.i = shufflevector <2 x i64> %sub.i, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + + +define <1 x i64> @test_mls5(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) { +; CHECK-LABEL: test_mls5: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: smlsl v0.2d, v2.2s, v3.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret +entry: + %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b) + %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %c, <2 x i32> %d) + %sub.i = sub <2 x i64> %vmull2.i, %vmull2.i.i + %shuffle.i = shufflevector <2 x i64> %sub.i, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + +declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) + +declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) + +declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) + +declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) + +declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) + +declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>)