diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -563,6 +563,11 @@ def SVST1W_VNUM_S : MInst<"svst1w_vnum[_{d}]", "vPCld", "l", [IsStore], MemEltTyInt32, "aarch64_sve_st1">; def SVST1W_VNUM_U : MInst<"svst1w_vnum[_{d}]", "vPGld", "Ul", [IsStore], MemEltTyInt32, "aarch64_sve_st1">; +let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in { + def SVST1_BF : MInst<"svst1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">; + def SVST1_VNUM_BF : MInst<"svst1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">; +} + // Store one vector (vector base) def SVST1_SCATTER_BASES_U : MInst<"svst1_scatter[_{2}base_{d}]", "vPud", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_scalar_offset">; def SVST1B_SCATTER_BASES_U : MInst<"svst1b_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt8, "aarch64_sve_st1_scatter_scalar_offset">; @@ -654,6 +659,11 @@ // Store one vector, with no truncation, non-temporal (scalar base, VL displacement) def SVSTNT1_VNUM : MInst<"svstnt1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">; +let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in { + def SVSTNT1_BF : MInst<"svstnt1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">; + def SVSTNT1_VNUM_BF : MInst<"svstnt1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">; +} + //////////////////////////////////////////////////////////////////////////////// // Prefetches diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) +{ + // CHECK-LABEL: test_svst1_bf16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: call void @llvm.aarch64.sve.st1.nxv8bf16( %data, %[[PG]], bfloat* %base) + // CHECK: ret void + // expected-warning@+1 {{implicit declaration of function 'svst1_bf16'}} + return SVE_ACLE_FUNC(svst1,_bf16,,)(pg, base, data); +} + +void test_svst1_vnum_bf16(svbool_t pg, bfloat16_t *base, int64_t vnum, svbfloat16_t data) +{ + // CHECK-LABEL: test_svst1_vnum_bf16 + // CHECK-DAG: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK-DAG: %[[BASE:.*]] = bitcast bfloat* %base to * + // CHECK-DAG: %[[GEP:.*]] = getelementptr , * %[[BASE]], i64 %vnum, i64 0 + // CHECK: call void @llvm.aarch64.sve.st1.nxv8bf16( %data, %[[PG]], bfloat* %[[GEP]]) + // CHECK: ret void + // expected-warning@+1 {{implicit declaration of function 'svst1_vnum_bf16'}} + return SVE_ACLE_FUNC(svst1_vnum,_bf16,,)(pg, base, vnum, data); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_stnt1-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_stnt1-bfloat.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_stnt1-bfloat.c @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -D__ARM_FEATURE_SVE_BF16 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +void test_svstnt1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) +{ + // CHECK-LABEL: test_svstnt1_bf16 + // CHECK-DAG: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8bf16( %data, %[[PG]], bfloat* %base) + // CHECK-NEXT: ret + // expected-warning@+1 {{implicit declaration of function 'svstnt1_bf16'}} + return SVE_ACLE_FUNC(svstnt1,_bf16,,)(pg, base, data); +} + +void test_svstnt1_vnum_bf16(svbool_t pg, bfloat16_t *base, int64_t vnum, svbfloat16_t data) +{ + // CHECK-LABEL: test_svstnt1_vnum_bf16 + // CHECK-DAG: %[[BITCAST:.*]] = bitcast bfloat* %base to * + // CHECK-DAG: %[[GEP:.*]] = getelementptr , * %[[BITCAST]], i64 %vnum, i64 0 + // CHECK-DAG: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8bf16( %data, %[[PG]], bfloat* %[[GEP]]) + // CHECK-NEXT: ret + // expected-warning@+1 {{implicit declaration of function 'svstnt1_vnum_bf16'}} + return SVE_ACLE_FUNC(svstnt1_vnum,_bf16,,)(pg, base, vnum, data); +} diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -12037,6 +12037,7 @@ case MVT::nxv8i8: case MVT::nxv8i16: case MVT::nxv8f16: + case MVT::nxv8bf16: return MVT::nxv8i16; case MVT::nxv16i8: return MVT::nxv16i8; @@ -12127,6 +12128,11 @@ EVT HwSrcVt = getSVEContainerType(DataVT); SDValue InputVT = DAG.getValueType(DataVT); + if (DataVT == MVT::nxv8bf16) + assert( + static_cast(DAG.getSubtarget()).hasBF16() && + "Unsupported type (BF16)"); + if (DataVT.isFloatingPoint()) InputVT = DAG.getValueType(HwSrcVt); @@ -12153,6 +12159,11 @@ EVT DataVT = Data.getValueType(); EVT PtrTy = N->getOperand(4).getValueType(); + if (DataVT == MVT::nxv8bf16) + assert( + static_cast(DAG.getSubtarget()).hasBF16() && + "Unsupported type (BF16)"); + if (DataVT.isFloatingPoint()) Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data); diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1444,6 +1444,7 @@ def : Pat<(nxv8i16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8i16 ZPR:$src)>; def : Pat<(nxv8i16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8i16 ZPR:$src)>; def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>; + def : Pat<(nxv8i16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8i16 ZPR:$src)>; def : Pat<(nxv8i16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8i16 ZPR:$src)>; def : Pat<(nxv8i16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8i16 ZPR:$src)>; @@ -1595,6 +1596,10 @@ defm : pred_store; defm : pred_store; + let Predicates = [HasBF16, HasSVE] in { + defm : pred_store; + } + // 16-element contiguous stores defm : pred_store; diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll @@ -126,6 +126,17 @@ ret void } +define void @st1h_bf16_inbound( %data, %pg, bfloat* %a) #0 { +; CHECK-LABEL: st1h_bf16_inbound: +; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast bfloat* %a to * + %base = getelementptr , * %base_scalable, i64 -5 + %base_scalar = bitcast * %base to bfloat* + call void @llvm.aarch64.sve.st1.nxv8bf16( %data, %pg, bfloat* %base_scalar) + ret void +} + define void @st1h_s_inbound( %data, %pg, i16* %a) { ; CHECK-LABEL: st1h_s_inbound: ; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl] @@ -219,6 +230,7 @@ declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.st1.nxv8bf16(, , bfloat*) declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) @@ -230,3 +242,6 @@ declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll @@ -82,6 +82,17 @@ ret void } +define void @st1h_bf16( %data, %pred, bfloat* %a, i64 %index) #0 { +; CHECK-LABEL: st1h_bf16: +; CHECK: st1h { z0.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base = getelementptr bfloat, bfloat* %a, i64 %index + call void @llvm.aarch64.sve.st1.nxv8bf16( %data, + %pred, + bfloat* %base) + ret void +} + define void @st1h_s( %data, %pred, i16* %addr) { ; CHECK-LABEL: st1h_s: ; CHECK: st1h { z0.s }, p0, [x0] @@ -174,6 +185,7 @@ declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.st1.nxv8bf16(, , bfloat*) declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) @@ -185,3 +197,6 @@ declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll @@ -75,6 +75,16 @@ ret void } +define void @st1h_bf16( %data, %pred, bfloat* %addr) #0 { +; CHECK-LABEL: st1h_bf16: +; CHECK: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv8bf16( %data, + %pred, + bfloat* %addr) + ret void +} + define void @st1h_s( %data, %pred, i16* %addr) { ; CHECK-LABEL: st1h_s: ; CHECK: st1h { z0.s }, p0, [x0] @@ -161,6 +171,7 @@ declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.st1.nxv8bf16(, , bfloat*) declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) @@ -172,3 +183,6 @@ declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll @@ -44,7 +44,7 @@ ret void } -define void @st2h_bf16( %v0, %v1, %pred, bfloat* %addr) { +define void @st2h_bf16( %v0, %v1, %pred, bfloat* %addr) #0 { ; CHECK-LABEL: st2h_bf16: ; CHECK: st2h { z0.h, z1.h }, p0, [x0] ; CHECK-NEXT: ret @@ -151,7 +151,7 @@ ret void } -define void @st3h_bf16( %v0, %v1, %v2, %pred, bfloat* %addr) { +define void @st3h_bf16( %v0, %v1, %v2, %pred, bfloat* %addr) #0 { ; CHECK-LABEL: st3h_bf16: ; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0] ; CHECK-NEXT: ret @@ -266,7 +266,7 @@ ret void } -define void @st4h_bf16( %v0, %v1, %v2, %v3, %pred, bfloat* %addr) { +define void @st4h_bf16( %v0, %v1, %v2, %v3, %pred, bfloat* %addr) #0 { ; CHECK-LABEL: st4h_bf16: ; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0] ; CHECK-NEXT: ret @@ -377,6 +377,16 @@ ret void } +define void @stnt1h_bf16( %data, %pred, bfloat* %addr) #0 { +; CHECK-LABEL: stnt1h_bf16: +; CHECK: stnt1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.stnt1.nxv8bf16( %data, + %pred, + bfloat* %addr) + ret void +} + ; ; STNT1W ; @@ -458,5 +468,9 @@ declare void @llvm.aarch64.sve.stnt1.nxv4i32(, , i32*) declare void @llvm.aarch64.sve.stnt1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.stnt1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.stnt1.nxv8bf16(, , bfloat*) declare void @llvm.aarch64.sve.stnt1.nxv4f32(, , float*) declare void @llvm.aarch64.sve.stnt1.nxv2f64(, , double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll --- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll @@ -179,6 +179,14 @@ ret void } +define void @masked_store_nxv8bf16( *%a, %val, %mask) nounwind #0 { +; CHECK-LABEL: masked_store_nxv8bf16: +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.masked.store.nxv8bf16( %val, *%a, i32 2, %mask) + ret void +} + declare @llvm.masked.load.nxv2i64(*, i32, , ) declare @llvm.masked.load.nxv4i32(*, i32, , ) declare @llvm.masked.load.nxv8i16(*, i32, , ) @@ -203,6 +211,7 @@ declare void @llvm.masked.store.nxv4f32(, *, i32, ) declare void @llvm.masked.store.nxv4f16(, *, i32, ) declare void @llvm.masked.store.nxv8f16(, *, i32, ) +declare void @llvm.masked.store.nxv8bf16(, *, i32, ) ; +bf16 is required for the bfloat version. attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll @@ -513,6 +513,24 @@ ret void } +define void @test_masked_ldst_sv8bf16( * %base, %mask) nounwind #0 { +; CHECK-LABEL: test_masked_ldst_sv8bf16: +; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl] +; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base_load = getelementptr , * %base, i64 -1 + %data = call @llvm.masked.load.nxv8bf16(* %base_load, + i32 1, + %mask, + undef) + %base_store = getelementptr , * %base, i64 2 + call void @llvm.masked.store.nxv8bf16( %data, + * %base_store, + i32 1, + %mask) + ret void +} + ; 8-lane zero/sign extended contiguous loads. define @masked_zload_sv8i8_to_sv8i16(* %base, %mask) nounwind { @@ -596,6 +614,7 @@ declare @llvm.masked.load.nxv8i8 (* , i32, , ) declare @llvm.masked.load.nxv8i16(*, i32, , ) declare @llvm.masked.load.nxv8f16(*, i32, , ) +declare @llvm.masked.load.nxv8bf16(*, i32, , ) ; 16-element contiguous loads. declare @llvm.masked.load.nxv16i8(*, i32, , ) @@ -620,6 +639,10 @@ declare void @llvm.masked.store.nxv8i8 ( , * , i32, ) declare void @llvm.masked.store.nxv8i16(, *, i32, ) declare void @llvm.masked.store.nxv8f16(, *, i32, ) +declare void @llvm.masked.store.nxv8bf16(, *, i32, ) ; 16-element contiguous stores. declare void @llvm.masked.store.nxv16i8(, *, i32, ) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll @@ -498,6 +498,24 @@ ret void } +define void @test_masked_ldst_sv8bf16(bfloat * %base, %mask, i64 %offset) nounwind #0 { +; CHECK-LABEL: test_masked_ldst_sv8bf16: +; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %base_f16 = getelementptr bfloat, bfloat* %base, i64 %offset + %base_addr = bitcast bfloat* %base_f16 to * + %data = call @llvm.masked.load.nxv8bf16(* %base_addr, + i32 1, + %mask, + undef) + call void @llvm.masked.store.nxv8bf16( %data, + * %base_addr, + i32 1, + %mask) + ret void +} + ; 8-lane zero/sign extended contiguous loads. define @masked_zload_sv8i8_to_sv8i16(i8* %base, %mask, i64 %offset) nounwind { @@ -584,6 +602,7 @@ declare @llvm.masked.load.nxv8i8 (* , i32, , ) declare @llvm.masked.load.nxv8i16(*, i32, , ) declare @llvm.masked.load.nxv8f16(*, i32, , ) +declare @llvm.masked.load.nxv8bf16(*, i32, , ) ; 16-element contiguous loads. declare @llvm.masked.load.nxv16i8(*, i32, , ) @@ -608,6 +627,10 @@ declare void @llvm.masked.store.nxv8i8 ( , * , i32, ) declare void @llvm.masked.store.nxv8i16(, *, i32, ) declare void @llvm.masked.store.nxv8f16(, *, i32, ) +declare void @llvm.masked.store.nxv8bf16(, *, i32, ) ; 16-element contiguous stores. declare void @llvm.masked.store.nxv16i8(, *, i32, ) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll @@ -139,6 +139,23 @@ ret void } +define void @test_masked_ldst_sv8bf16( * %base, %mask) nounwind #0 { +; CHECK-LABEL: test_masked_ldst_sv8bf16: +; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, #-1, mul vl] +; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base_load = getelementptr , * %base, i64 -1 + %base_load_bc = bitcast * %base_load to bfloat* + %data = call @llvm.aarch64.sve.ldnt1.nxv8bf16( %mask, + bfloat* %base_load_bc) + %base_store = getelementptr , * %base, i64 2 + %base_store_bc = bitcast * %base_store to bfloat* + call void @llvm.aarch64.sve.stnt1.nxv8bf16( %data, + %mask, + bfloat* %base_store_bc) + ret void +} + ; 16-lane non-temporal load/stores. define void @test_masked_ldst_sv16i8( * %base, %mask) nounwind { @@ -169,6 +186,7 @@ ; 8-element non-temporal loads. declare @llvm.aarch64.sve.ldnt1.nxv8i16(, i16*) declare @llvm.aarch64.sve.ldnt1.nxv8f16(, half*) +declare @llvm.aarch64.sve.ldnt1.nxv8bf16(, bfloat*) ; 16-element non-temporal loads. declare @llvm.aarch64.sve.ldnt1.nxv16i8(, i8*) @@ -176,15 +194,18 @@ ; 2-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.stnt1.nxv2f64(, , double*) - -; 4-element non-temporal stores. + +; 4-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv4i32(, , i32*) declare void @llvm.aarch64.sve.stnt1.nxv4f32(, , float*) - -; 8-element non-temporal stores. + +; 8-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv8i16(, , i16*) declare void @llvm.aarch64.sve.stnt1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.stnt1.nxv8bf16(, , bfloat*) ; 16-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv16i8(, , i8*) +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll @@ -94,6 +94,20 @@ ret void } +define void @test_masked_ldst_sv8bf16(bfloat* %base, %mask, i64 %offset) nounwind #0 { +; CHECK-LABEL: test_masked_ldst_sv8bf16: +; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %gep = getelementptr bfloat, bfloat* %base, i64 %offset + %data = call @llvm.aarch64.sve.ldnt1.nxv8bf16( %mask, + bfloat* %gep) + call void @llvm.aarch64.sve.stnt1.nxv8bf16( %data, + %mask, + bfloat* %gep) + ret void +} + ; 16-lane non-temporal load/stores. define void @test_masked_ldst_sv16i8(i8* %base, %mask, i64 %offset) nounwind { @@ -121,6 +135,7 @@ ; 8-element non-temporal loads. declare @llvm.aarch64.sve.ldnt1.nxv8i16(, i16*) declare @llvm.aarch64.sve.ldnt1.nxv8f16(, half*) +declare @llvm.aarch64.sve.ldnt1.nxv8bf16(, bfloat*) ; 16-element non-temporal loads. declare @llvm.aarch64.sve.ldnt1.nxv16i8(, i8*) @@ -128,14 +143,18 @@ ; 2-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv2i64(, , i64*) declare void @llvm.aarch64.sve.stnt1.nxv2f64(, , double*) - -; 4-element non-temporal stores. + +; 4-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv4i32(, , i32*) declare void @llvm.aarch64.sve.stnt1.nxv4f32(, , float*) - -; 8-element non-temporal stores. + +; 8-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv8i16(, , i16*) declare void @llvm.aarch64.sve.stnt1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.stnt1.nxv8bf16(, , bfloat*) ; 16-element non-temporal stores. declare void @llvm.aarch64.sve.stnt1.nxv16i8(, , i8*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" }