Please use GitHub pull requests for new patches. Phabricator shutdown timeline
Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/AArch64/sme2-intrinsics-fp-dots.ll
Show First 20 Lines • Show All 158 Lines • ▼ Show 20 Lines | |||||
; CHECK-NEXT: ret | ; CHECK-NEXT: ret | ||||
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) | call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) | ||||
%slice2 = add i32 %slice, 7 | %slice2 = add i32 %slice, 7 | ||||
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) | call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) | ||||
ret void | ret void | ||||
} | } | ||||
; == Multi, indexed (16-bit float) == | |||||
define void @fdot_lane_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2) #0 { | |||||
; CHECK-LABEL: fdot_lane_za32_f16_vg1x2: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: mov w8, w0 | |||||
; CHECK-NEXT: mov z5.d, z2.d | |||||
; CHECK-NEXT: mov z4.d, z1.d | |||||
; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] | |||||
; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] | |||||
; CHECK-NEXT: ret | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3) | |||||
%slice2 = add i32 %slice, 7 | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3) | |||||
ret void | |||||
} | |||||
define void @fdot_lane_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) #0 { | |||||
; CHECK-LABEL: fdot_lane_za32_f16_vg1x4: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: mov z27.d, z4.d | |||||
; CHECK-NEXT: mov w8, w0 | |||||
; CHECK-NEXT: mov z26.d, z3.d | |||||
; CHECK-NEXT: mov z25.d, z2.d | |||||
; CHECK-NEXT: mov z24.d, z1.d | |||||
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3] | |||||
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3] | |||||
; CHECK-NEXT: ret | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, | |||||
<vscale x 8 x half> %zn4, i32 3) | |||||
%slice2 = add i32 %slice, 7 | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, | |||||
<vscale x 8 x half> %zn4, i32 3) | |||||
ret void | |||||
} | |||||
; == Multi, indexed (16-bit bfloat) == | |||||
define void @bfdot_lane_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2) #0 { | |||||
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x2: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: mov w8, w0 | |||||
; CHECK-NEXT: mov z5.d, z2.d | |||||
; CHECK-NEXT: mov z4.d, z1.d | |||||
; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3] | |||||
; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3] | |||||
; CHECK-NEXT: ret | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3) | |||||
%slice2 = add i32 %slice, 7 | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3) | |||||
ret void | |||||
} | |||||
define void @bfdot_lane_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) #0 { | |||||
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x4: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: mov z27.d, z4.d | |||||
; CHECK-NEXT: mov w8, w0 | |||||
; CHECK-NEXT: mov z26.d, z3.d | |||||
; CHECK-NEXT: mov z25.d, z2.d | |||||
; CHECK-NEXT: mov z24.d, z1.d | |||||
; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3] | |||||
; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3] | |||||
; CHECK-NEXT: ret | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, | |||||
<vscale x 8 x bfloat> %zn4, i32 3) | |||||
%slice2 = add i32 %slice, 7 | |||||
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, | |||||
<vscale x 8 x bfloat> %zn4, i32 3) | |||||
ret void | |||||
} | |||||
attributes #0 = { nounwind "target-features"="+sme2" } | attributes #0 = { nounwind "target-features"="+sme2" } | ||||
; == Multi, multi (16-bit float) | ; == Multi, multi (16-bit float) | ||||
declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | ||||
declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, | declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, | ||||
<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | ||||
Show All 9 Lines | |||||
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | ||||
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) | ||||
; == Multi, single (16-bit bfloat) | ; == Multi, single (16-bit bfloat) | ||||
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||||
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) | ||||
; == Multi, indexed (16-bit float) | |||||
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32) | |||||
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32) | |||||
; == Multi, indexed (16-bit bfloat) | |||||
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) | |||||
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32) |