diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -497,6 +497,11 @@ // v8.3-A Floating-point complex add def int_aarch64_neon_vcadd_rot90 : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_neon_vcadd_rot270 : AdvSIMD_2VectorArg_Intrinsic; + + def int_aarch64_neon_vcmla : AdvSIMD_3VectorArg_Intrinsic; + def int_aarch64_neon_vcmla_rot90 : AdvSIMD_3VectorArg_Intrinsic; + def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic; + def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic; } let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -920,17 +920,55 @@ (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>; def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))), (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>; + + def : Pat<(v4f16 (int_aarch64_neon_vcmla (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm))), + (FCMLAv4f16 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>; + def : Pat<(v4f16 (int_aarch64_neon_vcmla_rot90 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm))), + (FCMLAv4f16 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>; + def : Pat<(v4f16 (int_aarch64_neon_vcmla_rot180 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm))), + (FCMLAv4f16 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 2))>; + def : Pat<(v4f16 (int_aarch64_neon_vcmla_rot270 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm))), + (FCMLAv4f16 (v4f16 V64:$Rd), (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 3))>; + + def : Pat<(v8f16 (int_aarch64_neon_vcmla (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm))), + (FCMLAv8f16 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>; + def : Pat<(v8f16 (int_aarch64_neon_vcmla_rot90 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm))), + (FCMLAv8f16 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>; + def : Pat<(v8f16 (int_aarch64_neon_vcmla_rot180 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm))), + (FCMLAv8f16 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 2))>; + def : Pat<(v8f16 (int_aarch64_neon_vcmla_rot270 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm))), + (FCMLAv8f16 (v8f16 V128:$Rd), (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 3))>; } + let Predicates = [HasComplxNum, HasNEON] in { def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))), (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>; def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))), (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>; + + def : Pat<(v2f32 (int_aarch64_neon_vcmla (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm))), + (FCMLAv2f32 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>; + def : Pat<(v2f32 (int_aarch64_neon_vcmla_rot90 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm))), + (FCMLAv2f32 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>; + def : Pat<(v2f32 (int_aarch64_neon_vcmla_rot180 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm))), + (FCMLAv2f32 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 2))>; + def : Pat<(v2f32 (int_aarch64_neon_vcmla_rot270 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm))), + (FCMLAv2f32 (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 3))>; + foreach Ty = [v4f32, v2f64] in { def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))), (!cast("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>; def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))), (!cast("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>; + + def : Pat<(Ty (int_aarch64_neon_vcmla (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm))), + (!cast("FCMLA"#Ty) (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>; + def : Pat<(Ty (int_aarch64_neon_vcmla_rot90 (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm))), + (!cast("FCMLA"#Ty) (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>; + def : Pat<(Ty (int_aarch64_neon_vcmla_rot180 (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm))), + (!cast("FCMLA"#Ty) (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm), (i32 2))>; + def : Pat<(Ty (int_aarch64_neon_vcmla_rot270 (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm))), + (!cast("FCMLA"#Ty) (Ty V128:$Rd), (Ty V128:$Rn), (Ty V128:$Rm), (i32 3))>; } } diff --git a/llvm/test/CodeGen/AArch64/neon-vcmla.ll b/llvm/test/CodeGen/AArch64/neon-vcmla.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/neon-vcmla.ll @@ -0,0 +1,183 @@ +; RUN: llc %s -mtriple=aarch64 -mattr=+v8.3a,+fullfp16 -o - | FileCheck %s + +define <4 x half> @test_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) { +entry: +; CHECK-LABEL: test_16x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #0 + %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) + ret <4 x half> %res +} + +define <4 x half> @test_rot90_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) { +entry: +; CHECK-LABEL: test_rot90_16x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #90 + %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot90.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) + ret <4 x half> %res +} + +define <4 x half> @test_rot180_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) { +entry: +; CHECK-LABEL: test_rot180_16x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #180 + %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot180.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) + ret <4 x half> %res +} + +define <4 x half> @test_rot270_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) { +entry: +; CHECK-LABEL: test_rot270_16x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #270 + %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot270.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) + ret <4 x half> %res +} + + +define <2 x float> @test_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +entry: +; CHECK-LABEL: test_32x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #0 + %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %res +} + +define <2 x float> @test_rot90_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +entry: +; CHECK-LABEL: test_rot90_32x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #90 + %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot90.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %res +} + +define <2 x float> @test_rot180_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +entry: +; CHECK-LABEL: test_rot180_32x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #180 + %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot180.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %res +} + +define <2 x float> @test_rot270_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { +entry: +; CHECK-LABEL: test_rot270_32x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #270 + %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot270.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) + ret <2 x float> %res +} + +define <8 x half> @test_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +entry: +; CHECK-LABEL: test_16x8 +; CHECK-DAG: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #0 + %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %res +} + +define <8 x half> @test_rot90_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +entry: +; CHECK-LABEL: test_rot90_16x8 +; CHECK-DAG: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #90 + %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot90.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %res +} + +define <8 x half> @test_rot180_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +entry: +; CHECK-LABEL: test_rot180_16x8 +; CHECK-DAG: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #180 + %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot180.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %res +} + +define <8 x half> @test_rot270_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +entry: +; CHECK-LABEL: test_rot270_16x8 +; CHECK-DAG: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #270 + %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot270.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %res +} + +define <4 x float> @test_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +entry: +; CHECK-LABEL: test_32x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #0 + %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) + ret <4 x float> %res +} + +define <4 x float> @test_rot90_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +entry: +; CHECK-LABEL: test_rot90_32x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #90 + %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot90.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) + ret <4 x float> %res +} + +define <4 x float> @test_rot180_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +entry: +; CHECK-LABEL: test_rot180_32x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #180 + %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot180.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) + ret <4 x float> %res +} + +define <4 x float> @test_rot270_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +entry: +; CHECK-LABEL: test_rot270_32x4 +; CHECK-DAG: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #270 + %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot270.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) + ret <4 x float> %res +} + +define <2 x double> @test_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +entry: +; CHECK-LABEL: test_64x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #0 + %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %res +} + +define <2 x double> @test_rot90_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +entry: +; CHECK-LABEL: test_rot90_64x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #90 + %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot90.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %res +} + +define <2 x double> @test_rot180_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +entry: +; CHECK-LABEL: test_rot180_64x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #180 + %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot180.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %res +} + +define <2 x double> @test_rot270_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +entry: +; CHECK-LABEL: test_rot270_64x2 +; CHECK-DAG: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #270 + %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %res +} + +declare <4 x half> @llvm.aarch64.neon.vcmla.v4f16(<4 x half>, <4 x half>, <4 x half>) +declare <4 x half> @llvm.aarch64.neon.vcmla.rot90.v4f16(<4 x half>, <4 x half>, <4 x half>) +declare <4 x half> @llvm.aarch64.neon.vcmla.rot180.v4f16(<4 x half>, <4 x half>, <4 x half>) +declare <4 x half> @llvm.aarch64.neon.vcmla.rot270.v4f16(<4 x half>, <4 x half>, <4 x half>) +declare <8 x half> @llvm.aarch64.neon.vcmla.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <8 x half> @llvm.aarch64.neon.vcmla.rot90.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <8 x half> @llvm.aarch64.neon.vcmla.rot180.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <8 x half> @llvm.aarch64.neon.vcmla.rot270.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <2 x float> @llvm.aarch64.neon.vcmla.v2f32(<2 x float>, <2 x float>, <2 x float>) +declare <2 x float> @llvm.aarch64.neon.vcmla.rot90.v2f32(<2 x float>, <2 x float>, <2 x float>) +declare <2 x float> @llvm.aarch64.neon.vcmla.rot180.v2f32(<2 x float>, <2 x float>, <2 x float>) +declare <2 x float> @llvm.aarch64.neon.vcmla.rot270.v2f32(<2 x float>, <2 x float>, <2 x float>) +declare <4 x float> @llvm.aarch64.neon.vcmla.v4f32(<4 x float>, <4 x float>, <4 x float>) +declare <4 x float> @llvm.aarch64.neon.vcmla.rot90.v4f32(<4 x float>, <4 x float>, <4 x float>) +declare <4 x float> @llvm.aarch64.neon.vcmla.rot180.v4f32(<4 x float>, <4 x float>, <4 x float>) +declare <4 x float> @llvm.aarch64.neon.vcmla.rot270.v4f32(<4 x float>, <4 x float>, <4 x float>) +declare <2 x double> @llvm.aarch64.neon.vcmla.v2f64(<2 x double>, <2 x double>, <2 x double>) +declare <2 x double> @llvm.aarch64.neon.vcmla.rot90.v2f64(<2 x double>, <2 x double>, <2 x double>) +declare <2 x double> @llvm.aarch64.neon.vcmla.rot180.v2f64(<2 x double>, <2 x double>, <2 x double>) +declare <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double>, <2 x double>, <2 x double>)