Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1153,8 +1153,10 @@ setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); } - for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) + for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + } setOperationAction(ISD::SPLAT_VECTOR, MVT::nxv8bf16, Custom); Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1196,6 +1196,10 @@ (UUNPKLO_ZZ_D ZPR:$Zs)>; def : Pat<(nxv2bf16 (extract_subvector (nxv4bf16 ZPR:$Zs), (i64 2))), (UUNPKHI_ZZ_D ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 0))), + (UUNPKLO_ZZ_S ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 4))), + (UUNPKHI_ZZ_S ZPR:$Zs)>; } def : Pat<(nxv4f16 (extract_subvector (nxv8f16 ZPR:$Zs), (i64 0))), Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll @@ -48,6 +48,16 @@ ret %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -125,6 +135,16 @@ ret %vals } +define @masked_gather_nxv4bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -150,10 +170,13 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll @@ -63,6 +63,17 @@ ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -169,6 +180,17 @@ ret %vals } +define @masked_gather_nxv4bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -208,6 +230,7 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) @@ -215,4 +238,6 @@ declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll @@ -52,6 +52,17 @@ ret %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr bfloat, bfloat* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -136,6 +147,17 @@ ret %vals } +define @masked_gather_nxv4bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr bfloat, bfloat* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -163,10 +185,13 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll @@ -68,6 +68,18 @@ ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -183,6 +195,18 @@ ret %vals } +define @masked_gather_nxv4bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -225,6 +249,7 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) @@ -232,4 +257,6 @@ declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll @@ -44,6 +44,16 @@ ret %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -90,5 +100,7 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll +++ llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll @@ -59,6 +59,17 @@ ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -121,5 +132,7 @@ declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } Index: llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll +++ llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll @@ -25,6 +25,16 @@ ret void } +define void @masked_scatter_nxv8bf16( %data, bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_scatter_nxv8bf16 +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + call void @llvm.masked.scatter.nxv8bf16( %data, %ptrs, i32 1, %mask) + ret void +} + define void @masked_scatter_nxv8f32( %data, float* %base, %indexes, %masks) { ; CHECK-LABEL: masked_scatter_nxv8f32 ; CHECK-DAG: st1w { z0.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, uxtw #2] @@ -56,4 +66,6 @@ declare void @llvm.masked.scatter.nxv16i8(, , i32, ) declare void @llvm.masked.scatter.nxv8i16(, , i32, ) declare void @llvm.masked.scatter.nxv8f32(, , i32, ) +declare void @llvm.masked.scatter.nxv8bf16(, , i32, ) declare void @llvm.masked.scatter.nxv32i32(, , i32, ) +attributes #0 = { "target-features"="+sve,+bf16" }