Index: llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll @@ -4,6 +4,18 @@ ; FADD ; +define @fadd_h( %pg, %a, %b) { +; CHECK-LABEL: fadd_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fadd.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fadd_s( %pg, %a, %b) { ; CHECK-LABEL: fadd_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -32,6 +44,18 @@ ; FMAX ; +define @fmax_h( %pg, %a, %b) { +; CHECK-LABEL: fmax_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fmax.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fmax_s( %pg, %a, %b) { ; CHECK-LABEL: fmax_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -60,6 +84,18 @@ ; FMAXNM ; +define @fmaxnm_h( %pg, %a, %b) { +; CHECK-LABEL: fmaxnm_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fmaxnm.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fmaxnm_s( %pg, %a, %b) { ; CHECK-LABEL: fmaxnm_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -88,6 +124,18 @@ ; FMIN ; +define @fmin_h( %pg, %a, %b) { +; CHECK-LABEL: fmin_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fmin.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fmin_s( %pg, %a, %b) { ; CHECK-LABEL: fmin_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -116,6 +164,18 @@ ; FMINNM ; +define @fminnm_h( %pg, %a, %b) { +; CHECK-LABEL: fminnm_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fminnm.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fminnm_s( %pg, %a, %b) { ; CHECK-LABEL: fminnm_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -144,6 +204,18 @@ ; FMUL ; +define @fmul_h( %pg, %a, %b) { +; CHECK-LABEL: fmul_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fmul.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fmul_s( %pg, %a, %b) { ; CHECK-LABEL: fmul_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -172,6 +244,18 @@ ; FSUB ; +define @fsub_h( %pg, %a, %b) { +; CHECK-LABEL: fsub_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fsub.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fsub_s( %pg, %a, %b) { ; CHECK-LABEL: fsub_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -200,6 +284,18 @@ ; FSUBR ; +define @fsubr_h( %pg, %a, %b) { +; CHECK-LABEL: fsubr_h: +; CHECK: movprfx z0.h, p0/z, z0.h +; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %a_z = select %pg, %a, zeroinitializer + %out = call @llvm.aarch64.sve.fsubr.nxv8f16( %pg, + %a_z, + %b) + ret %out +} + define @fsubr_s( %pg, %a, %b) { ; CHECK-LABEL: fsubr_s: ; CHECK: movprfx z0.s, p0/z, z0.s @@ -224,38 +320,50 @@ ret %out } +declare @llvm.aarch64.sve.fabd.nxv8f16(, , ) declare @llvm.aarch64.sve.fabd.nxv4f32(, , ) declare @llvm.aarch64.sve.fabd.nxv2f64(, , ) +declare @llvm.aarch64.sve.fadd.nxv8f16(, , ) declare @llvm.aarch64.sve.fadd.nxv4f32(, , ) declare @llvm.aarch64.sve.fadd.nxv2f64(, , ) +declare @llvm.aarch64.sve.fdiv.nxv8f16(, , ) declare @llvm.aarch64.sve.fdiv.nxv4f32(, , ) declare @llvm.aarch64.sve.fdiv.nxv2f64(, , ) +declare @llvm.aarch64.sve.fdivr.nxv8f16(, , ) declare @llvm.aarch64.sve.fdivr.nxv4f32(, , ) declare @llvm.aarch64.sve.fdivr.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmax.nxv8f16(, , ) declare @llvm.aarch64.sve.fmax.nxv4f32(, , ) declare @llvm.aarch64.sve.fmax.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmaxnm.nxv8f16(, , ) declare @llvm.aarch64.sve.fmaxnm.nxv4f32(, , ) declare @llvm.aarch64.sve.fmaxnm.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmin.nxv8f16(, , ) declare @llvm.aarch64.sve.fmin.nxv4f32(, , ) declare @llvm.aarch64.sve.fmin.nxv2f64(, , ) +declare @llvm.aarch64.sve.fminnm.nxv8f16(, , ) declare @llvm.aarch64.sve.fminnm.nxv4f32(, , ) declare @llvm.aarch64.sve.fminnm.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmul.nxv8f16(, , ) declare @llvm.aarch64.sve.fmul.nxv4f32(, , ) declare @llvm.aarch64.sve.fmul.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmulx.nxv8f16(, , ) declare @llvm.aarch64.sve.fmulx.nxv4f32(, , ) declare @llvm.aarch64.sve.fmulx.nxv2f64(, , ) +declare @llvm.aarch64.sve.fsub.nxv8f16(, , ) declare @llvm.aarch64.sve.fsub.nxv4f32(, , ) declare @llvm.aarch64.sve.fsub.nxv2f64(, , ) +declare @llvm.aarch64.sve.fsubr.nxv8f16(, , ) declare @llvm.aarch64.sve.fsubr.nxv4f32(, , ) declare @llvm.aarch64.sve.fsubr.nxv2f64(, , )