diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -2481,7 +2481,7 @@ MVT::i32); }]>; -// Extract S sub-registers of Q/D registers containing a given f16 lane. +// Extract S sub-registers of Q/D registers containing a given f16/bf16 lane. def SSubReg_f16_reg : SDNodeXFormgetTargetConstant(ARM::ssub_0 + N->getZExtValue()/2, SDLoc(N), @@ -6407,29 +6407,57 @@ def imm_even : ImmLeaf; def imm_odd : ImmLeaf; -let Predicates = [HasNEON] in { -def : Pat<(extractelt (v4f16 DPR:$src), imm_even:$lane), - (EXTRACT_SUBREG - (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), - (SSubReg_f16_reg imm_even:$lane))>; +multiclass ExtractEltEvenF16 { + def : Pat<(extractelt (VT4 DPR:$src), imm_even:$lane), + (EXTRACT_SUBREG + (v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)), + (SSubReg_f16_reg imm_even:$lane))>; + def : Pat<(extractelt (VT8 QPR:$src), imm_even:$lane), + (EXTRACT_SUBREG + (v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)), + (SSubReg_f16_reg imm_even:$lane))>; +} -def : Pat<(extractelt (v4f16 DPR:$src), imm_odd:$lane), +multiclass ExtractEltOddF16VMOVH { + def : Pat<(extractelt (VT4 DPR:$src), imm_odd:$lane), (COPY_TO_REGCLASS (VMOVH (EXTRACT_SUBREG - (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), - (SSubReg_f16_reg imm_odd:$lane))), + (v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)), + (SSubReg_f16_reg imm_odd:$lane))), HPR)>; + def : Pat<(extractelt (VT8 QPR:$src), imm_odd:$lane), + (COPY_TO_REGCLASS + (VMOVH (EXTRACT_SUBREG + (v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)), + (SSubReg_f16_reg imm_odd:$lane))), + HPR)>; +} -def : Pat<(extractelt (v8f16 QPR:$src), imm_even:$lane), - (EXTRACT_SUBREG - (v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)), - (SSubReg_f16_reg imm_even:$lane))>; +let Predicates = [HasNEON] in { + defm : ExtractEltEvenF16; + defm : ExtractEltOddF16VMOVH; +} + +let AddedComplexity = 1, Predicates = [HasNEON, HasBF16, HasFullFP16] in { + // If VMOVH (vmovx.f16) is available use it to extract BF16 from the odd lanes + defm : ExtractEltOddF16VMOVH; +} -def : Pat<(extractelt (v8f16 QPR:$src), imm_odd:$lane), +let Predicates = [HasBF16, HasNEON] in { + defm : ExtractEltEvenF16; + + // Otherwise, if VMOVH is not available resort to extracting the odd lane + // into a GPR and then moving to HPR + def : Pat<(extractelt (v4bf16 DPR:$src), imm_odd:$lane), (COPY_TO_REGCLASS - (VMOVH (EXTRACT_SUBREG - (v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)), - (SSubReg_f16_reg imm_odd:$lane))), + (VGETLNu16 (v4bf16 DPR:$src), imm:$lane), + HPR)>; + + def : Pat<(extractelt (v8bf16 QPR:$src), imm_odd:$lane), + (COPY_TO_REGCLASS + (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src, + (DSubReg_i16_reg imm:$lane))), + (SubReg_i16_lane imm:$lane)), HPR)>; } @@ -6465,6 +6493,21 @@ } } +// TODO: for odd lanes we could optimize this a bit by using the VINS +// FullFP16 instruction when it is available +multiclass InsertEltF16 { + def : Pat<(insertelt (VT4 DPR:$src1), (VTScalar HPR:$src2), imm:$lane), + (VT4 (VSETLNi16 DPR:$src1, + (COPY_TO_REGCLASS HPR:$src2, GPR), imm:$lane))>; + def : Pat<(insertelt (VT8 QPR:$src1), (VTScalar HPR:$src2), imm:$lane), + (VT8 (INSERT_SUBREG QPR:$src1, + (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1, + (DSubReg_i16_reg imm:$lane))), + (COPY_TO_REGCLASS HPR:$src2, GPR), + (SubReg_i16_lane imm:$lane))), + (DSubReg_i16_reg imm:$lane)))>; +} + let Predicates = [HasNEON] in { def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane), (v16i8 (INSERT_SUBREG QPR:$src1, @@ -6492,14 +6535,7 @@ (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)), SPR:$src2, (SSubReg_f32_reg imm:$src3))>; -def : Pat<(insertelt (v4f16 DPR:$src1), (f16 HPR:$src2), imm:$lane), - (v4f16 (VSETLNi16 DPR:$src1, (VMOVRH $src2), imm:$lane))>; -def : Pat<(insertelt (v8f16 QPR:$src1), (f16 HPR:$src2), imm:$lane), - (v8f16 (INSERT_SUBREG QPR:$src1, - (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1, - (DSubReg_i16_reg imm:$lane))), - (VMOVRH $src2), (SubReg_i16_lane imm:$lane))), - (DSubReg_i16_reg imm:$lane)))>; +defm : InsertEltF16; //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)), // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>; @@ -6534,6 +6570,9 @@ dsub_0)>; } +let Predicates = [HasNEON, HasBF16] in +defm : InsertEltF16; + // VDUP : Vector Duplicate (from ARM core register to all elements) class VDUPD opcod1, bits<2> opcod3, string Dt, ValueType Ty> @@ -6652,6 +6691,23 @@ (f16 HPR:$src), ssub_0), (i32 0)))>; } +let Predicates = [HasNEON, HasBF16] in { +def : Pat<(v4bf16 (ARMvduplane (v4bf16 DPR:$Vm), imm:$lane)), + (VDUPLN16d DPR:$Vm, imm:$lane)>; + +def : Pat<(v8bf16 (ARMvduplane (v8bf16 QPR:$src), imm:$lane)), + (v8bf16 (VDUPLN16q (v4bf16 (EXTRACT_SUBREG QPR:$src, + (DSubReg_i16_reg imm:$lane))), + (SubReg_i16_lane imm:$lane)))>; + +def : Pat<(v4bf16 (ARMvdup (bf16 HPR:$src))), + (v4bf16 (VDUPLN16d (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), + (bf16 HPR:$src), ssub_0), (i32 0)))>; +def : Pat<(v8bf16 (ARMvdup (bf16 HPR:$src))), + (v8bf16 (VDUPLN16q (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), + (bf16 HPR:$src), ssub_0), (i32 0)))>; +} + // VMOVN : Vector Narrowing Move defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN, "vmovn", "i", trunc>; @@ -7979,6 +8035,8 @@ (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; def : Pat<(v8f16 (concat_vectors DPR:$Dn, DPR:$Dm)), (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; +def : Pat<(v8bf16 (concat_vectors DPR:$Dn, DPR:$Dm)), + (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/ARM/bf16-create-get-set-dup.ll b/llvm/test/CodeGen/ARM/bf16-create-get-set-dup.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/ARM/bf16-create-get-set-dup.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon,fullfp16 < %s | FileCheck %s +; FIXME: Remove fullfp16 once bfloat arguments and returns lowering stops +; depending on it. + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8.6a-arm-none-eabi" + +define arm_aapcs_vfpcc <4 x bfloat> @test_vcreate_bf16(i64 %a) { +; CHECK-LABEL: test_vcreate_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r0, r1 +; CHECK-NEXT: bx lr +entry: + %0 = bitcast i64 %a to <4 x bfloat> + ret <4 x bfloat> %0 +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_n_bf16(bfloat %v) { +; CHECK-LABEL: test_vdup_n_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0 +; CHECK-NEXT: vdup.16 d0, d0[0] +; CHECK-NEXT: bx lr +entry: + %vecinit.i = insertelement <4 x bfloat> undef, bfloat %v, i32 0 + %vecinit3.i = shufflevector <4 x bfloat> %vecinit.i, <4 x bfloat> undef, <4 x i32> zeroinitializer + ret <4 x bfloat> %vecinit3.i +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_n_bf16(bfloat %v) { +; CHECK-LABEL: test_vdupq_n_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0 +; CHECK-NEXT: vdup.16 q0, d0[0] +; CHECK-NEXT: bx lr +entry: + %vecinit.i = insertelement <8 x bfloat> undef, bfloat %v, i32 0 + %vecinit7.i = shufflevector <8 x bfloat> %vecinit.i, <8 x bfloat> undef, <8 x i32> zeroinitializer + ret <8 x bfloat> %vecinit7.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_lane_bf16(<4 x bfloat> %v) { +; CHECK-LABEL: test_vdup_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d0, d0[1] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_lane_bf16(<4 x bfloat> %v) { +; CHECK-LABEL: test_vdupq_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: vdup.16 q0, d0[1] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <8 x i32> + ret <8 x bfloat> %lane +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_laneq_bf16(<8 x bfloat> %v) { +; CHECK-LABEL: test_vdup_laneq_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d0, d1[3] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_laneq_bf16(<8 x bfloat> %v) { +; CHECK-LABEL: test_vdupq_laneq_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 q0, d1[3] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <8 x i32> + ret <8 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vcombine_bf16(<4 x bfloat> %low, <4 x bfloat> %high) { +; CHECK-LABEL: test_vcombine_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f64 d16, d1 +; CHECK-NEXT: vorr d17, d0, d0 +; CHECK-NEXT: vorr q0, q8, q8 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <4 x bfloat> %high, <4 x bfloat> %low, <8 x i32> + ret <8 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vget_high_bf16(<8 x bfloat> %a) { +; CHECK-LABEL: test_vget_high_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f64 d0, d1 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vget_low_bf16(<8 x bfloat> %a) { +; CHECK-LABEL: test_vget_low_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s0, s3 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 6 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s3 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 7 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 2 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 1 + ret bfloat %0 +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vset_lane_bf16(bfloat %a, <4 x bfloat> %v) { +; CHECK-LABEL: test_vset_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 d1[1], r0 +; CHECK-NEXT: vorr d0, d1, d1 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <4 x bfloat> %v, bfloat %a, i32 1 + ret <4 x bfloat> %0 +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vsetq_lane_bf16(bfloat %a, <8 x bfloat> %v) { +; CHECK-LABEL: test_vsetq_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 d3[3], r0 +; CHECK-NEXT: vorr q0, q1, q1 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <8 x bfloat> %v, bfloat %a, i32 7 + ret <8 x bfloat> %0 +} diff --git a/llvm/test/CodeGen/ARM/bf16-getlane-with-fp16.ll b/llvm/test/CodeGen/ARM/bf16-getlane-with-fp16.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/ARM/bf16-getlane-with-fp16.ll @@ -0,0 +1,45 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon,+fullfp16 < %s | FileCheck %s + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8.6a-arm-none-eabi" + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s0, s3 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 6 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s3 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 7 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 2 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 1 + ret bfloat %0 +} diff --git a/llvm/test/CodeGen/ARM/fp16-insert-extract.ll b/llvm/test/CodeGen/ARM/fp16-insert-extract.ll --- a/llvm/test/CodeGen/ARM/fp16-insert-extract.ll +++ b/llvm/test/CodeGen/ARM/fp16-insert-extract.ll @@ -82,7 +82,7 @@ ; CHECKHARD-LABEL: test_vset_lane_f16: ; CHECKHARD: @ %bb.0: @ %entry ; CHECKHARD-NEXT: vcvtb.f16.f32 s2, s2 -; CHECKHARD-NEXT: vmov.f16 r0, s2 +; CHECKHARD-NEXT: vmov r0, s2 ; CHECKHARD-NEXT: vmov.16 d0[3], r0 ; CHECKHARD-NEXT: bx lr ; @@ -91,7 +91,7 @@ ; CHECKSOFT-NEXT: vmov s0, r2 ; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0 ; CHECKSOFT-NEXT: vmov d16, r0, r1 -; CHECKSOFT-NEXT: vmov.f16 r2, s0 +; CHECKSOFT-NEXT: vmov r2, s0 ; CHECKSOFT-NEXT: vmov.16 d16[3], r2 ; CHECKSOFT-NEXT: vmov r0, r1, d16 ; CHECKSOFT-NEXT: bx lr @@ -105,7 +105,7 @@ ; CHECKHARD-LABEL: test_vset_laneq_f16_1: ; CHECKHARD: @ %bb.0: @ %entry ; CHECKHARD-NEXT: vcvtb.f16.f32 s4, s4 -; CHECKHARD-NEXT: vmov.f16 r0, s4 +; CHECKHARD-NEXT: vmov r0, s4 ; CHECKHARD-NEXT: vmov.16 d0[1], r0 ; CHECKHARD-NEXT: bx lr ; @@ -115,7 +115,7 @@ ; CHECKSOFT-NEXT: vmov d17, r2, r3 ; CHECKSOFT-NEXT: vmov d16, r0, r1 ; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0 -; CHECKSOFT-NEXT: vmov.f16 r12, s0 +; CHECKSOFT-NEXT: vmov r12, s0 ; CHECKSOFT-NEXT: vmov.16 d16[1], r12 ; CHECKSOFT-NEXT: vmov r2, r3, d17 ; CHECKSOFT-NEXT: vmov r0, r1, d16 @@ -130,7 +130,7 @@ ; CHECKHARD-LABEL: test_vset_laneq_f16_7: ; CHECKHARD: @ %bb.0: @ %entry ; CHECKHARD-NEXT: vcvtb.f16.f32 s4, s4 -; CHECKHARD-NEXT: vmov.f16 r0, s4 +; CHECKHARD-NEXT: vmov r0, s4 ; CHECKHARD-NEXT: vmov.16 d1[3], r0 ; CHECKHARD-NEXT: bx lr ; @@ -140,7 +140,7 @@ ; CHECKSOFT-NEXT: vmov d17, r2, r3 ; CHECKSOFT-NEXT: vmov d16, r0, r1 ; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0 -; CHECKSOFT-NEXT: vmov.f16 r12, s0 +; CHECKSOFT-NEXT: vmov r12, s0 ; CHECKSOFT-NEXT: vmov.16 d17[3], r12 ; CHECKSOFT-NEXT: vmov r0, r1, d16 ; CHECKSOFT-NEXT: vmov r2, r3, d17