Index: llvm/lib/Target/ARM/ARMInstrNEON.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrNEON.td +++ llvm/lib/Target/ARM/ARMInstrNEON.td @@ -2481,7 +2481,7 @@ MVT::i32); }]>; -// Extract S sub-registers of Q/D registers containing a given f16 lane. +// Extract S sub-registers of Q/D registers containing a given f16/bf16 lane. def SSubReg_f16_reg : SDNodeXFormgetTargetConstant(ARM::ssub_0 + N->getZExtValue()/2, SDLoc(N), @@ -6407,30 +6407,56 @@ def imm_even : ImmLeaf; def imm_odd : ImmLeaf; +multiclass ExtractEltEvenF16 { + def : Pat<(extractelt (VT4 DPR:$src), imm_even:$lane), + (EXTRACT_SUBREG + (v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)), + (SSubReg_f16_reg imm_even:$lane))>; + def : Pat<(extractelt (VT8 QPR:$src), imm_even:$lane), + (EXTRACT_SUBREG + (v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)), + (SSubReg_f16_reg imm_even:$lane))>; +} + +multiclass ExtractEltOddF16VMOVH { + def : Pat<(extractelt (VT4 DPR:$src), imm_odd:$lane), + (COPY_TO_REGCLASS + (VMOVH (EXTRACT_SUBREG + (v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)), + (SSubReg_f16_reg imm_odd:$lane))), + HPR)>; + + + def : Pat<(extractelt (VT8 QPR:$src), imm_odd:$lane), + (COPY_TO_REGCLASS + (VMOVH (EXTRACT_SUBREG + (v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)), + (SSubReg_f16_reg imm_odd:$lane))), + HPR)>; +} + let Predicates = [HasNEON] in { -def : Pat<(extractelt (v4f16 DPR:$src), imm_even:$lane), - (EXTRACT_SUBREG - (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), - (SSubReg_f16_reg imm_even:$lane))>; - -def : Pat<(extractelt (v4f16 DPR:$src), imm_odd:$lane), - (COPY_TO_REGCLASS - (VMOVH (EXTRACT_SUBREG - (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)), - (SSubReg_f16_reg imm_odd:$lane))), - HPR)>; - -def : Pat<(extractelt (v8f16 QPR:$src), imm_even:$lane), - (EXTRACT_SUBREG - (v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)), - (SSubReg_f16_reg imm_even:$lane))>; - -def : Pat<(extractelt (v8f16 QPR:$src), imm_odd:$lane), - (COPY_TO_REGCLASS - (VMOVH (EXTRACT_SUBREG - (v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)), - (SSubReg_f16_reg imm_odd:$lane))), - HPR)>; + defm : ExtractEltEvenF16; + defm : ExtractEltOddF16VMOVH; +} + +let AddedComplexity = 1, Predicates = [HasNEON, HasBF16, HasFullFP16] in { + // If VMOVH (vmovx.f16) is available use it to extract BF16 from the odd lanes + defm : ExtractEltOddF16VMOVH; +} + +let Predicates = [HasBF16, HasNEON] in { + defm : ExtractEltEvenF16; + + // Otherwise, if VMOVH is not available resort to extracting the odd lane + // into a GPR and then moving to HPR + def : Pat<(extractelt (v4bf16 DPR:$src), imm_odd:$lane), + (VMOVHR (VGETLNu16 (v4bf16 DPR:$src), imm:$lane))>; + + def : Pat<(extractelt (v8bf16 QPR:$src), imm_odd:$lane), + (VMOVHR (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src, + (DSubReg_i16_reg imm:$lane))), + (SubReg_i16_lane imm:$lane)))>; } // VMOV : Vector Set Lane (move ARM core register to scalar) @@ -6465,6 +6491,17 @@ } } +multiclass InsertEltF16 { + def : Pat<(insertelt (VT4 DPR:$src1), (VTScalar HPR:$src2), imm:$lane), + (VT4 (VSETLNi16 DPR:$src1, (VMOVRH $src2), imm:$lane))>; + def : Pat<(insertelt (VT8 QPR:$src1), (VTScalar HPR:$src2), imm:$lane), + (VT8 (INSERT_SUBREG QPR:$src1, + (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1, + (DSubReg_i16_reg imm:$lane))), + (VMOVRH $src2), (SubReg_i16_lane imm:$lane))), + (DSubReg_i16_reg imm:$lane)))>; +} + let Predicates = [HasNEON] in { def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane), (v16i8 (INSERT_SUBREG QPR:$src1, @@ -6492,14 +6529,7 @@ (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)), SPR:$src2, (SSubReg_f32_reg imm:$src3))>; -def : Pat<(insertelt (v4f16 DPR:$src1), (f16 HPR:$src2), imm:$lane), - (v4f16 (VSETLNi16 DPR:$src1, (VMOVRH $src2), imm:$lane))>; -def : Pat<(insertelt (v8f16 QPR:$src1), (f16 HPR:$src2), imm:$lane), - (v8f16 (INSERT_SUBREG QPR:$src1, - (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1, - (DSubReg_i16_reg imm:$lane))), - (VMOVRH $src2), (SubReg_i16_lane imm:$lane))), - (DSubReg_i16_reg imm:$lane)))>; +defm : InsertEltF16; //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)), // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>; @@ -6534,6 +6564,9 @@ dsub_0)>; } +let Predicates = [HasNEON, HasBF16] in +defm : InsertEltF16; + // VDUP : Vector Duplicate (from ARM core register to all elements) class VDUPD opcod1, bits<2> opcod3, string Dt, ValueType Ty> @@ -6652,6 +6685,23 @@ (f16 HPR:$src), ssub_0), (i32 0)))>; } +let Predicates = [HasNEON, HasBF16] in { +def : Pat<(v4bf16 (ARMvduplane (v4bf16 DPR:$Vm), imm:$lane)), + (VDUPLN16d DPR:$Vm, imm:$lane)>; + +def : Pat<(v8bf16 (ARMvduplane (v8bf16 QPR:$src), imm:$lane)), + (v8bf16 (VDUPLN16q (v4bf16 (EXTRACT_SUBREG QPR:$src, + (DSubReg_i16_reg imm:$lane))), + (SubReg_i16_lane imm:$lane)))>; + +def : Pat<(v4bf16 (ARMvdup (bf16 HPR:$src))), + (v4bf16 (VDUPLN16d (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), + (bf16 HPR:$src), ssub_0), (i32 0)))>; +def : Pat<(v8bf16 (ARMvdup (bf16 HPR:$src))), + (v8bf16 (VDUPLN16q (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), + (bf16 HPR:$src), ssub_0), (i32 0)))>; +} + // VMOVN : Vector Narrowing Move defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN, "vmovn", "i", trunc>; @@ -7979,6 +8029,8 @@ (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; def : Pat<(v8f16 (concat_vectors DPR:$Dn, DPR:$Dm)), (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; +def : Pat<(v8bf16 (concat_vectors DPR:$Dn, DPR:$Dm)), + (REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>; } //===----------------------------------------------------------------------===// Index: llvm/test/CodeGen/ARM/bf16-create-get-set-dup.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/ARM/bf16-create-get-set-dup.ll @@ -0,0 +1,179 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon < %s | FileCheck %s + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8.6a-arm-none-eabi" + +define arm_aapcs_vfpcc <4 x bfloat> @test_vcreate_bf16(i64 %a) { +; CHECK-LABEL: test_vcreate_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r0, r1 +; CHECK-NEXT: bx lr +entry: + %0 = bitcast i64 %a to <4 x bfloat> + ret <4 x bfloat> %0 +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_n_bf16(bfloat %v) { +; CHECK-LABEL: test_vdup_n_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0 +; CHECK-NEXT: vdup.16 d0, d0[0] +; CHECK-NEXT: bx lr +entry: + %vecinit.i = insertelement <4 x bfloat> undef, bfloat %v, i32 0 + %vecinit3.i = shufflevector <4 x bfloat> %vecinit.i, <4 x bfloat> undef, <4 x i32> zeroinitializer + ret <4 x bfloat> %vecinit3.i +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_n_bf16(bfloat %v) { +; CHECK-LABEL: test_vdupq_n_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0 +; CHECK-NEXT: vdup.16 q0, d0[0] +; CHECK-NEXT: bx lr +entry: + %vecinit.i = insertelement <8 x bfloat> undef, bfloat %v, i32 0 + %vecinit7.i = shufflevector <8 x bfloat> %vecinit.i, <8 x bfloat> undef, <8 x i32> zeroinitializer + ret <8 x bfloat> %vecinit7.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_lane_bf16(<4 x bfloat> %v) { +; CHECK-LABEL: test_vdup_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d0, d0[1] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_lane_bf16(<4 x bfloat> %v) { +; CHECK-LABEL: test_vdupq_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: vdup.16 q0, d0[1] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <8 x i32> + ret <8 x bfloat> %lane +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_laneq_bf16(<8 x bfloat> %v) { +; CHECK-LABEL: test_vdup_laneq_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d0, d1[3] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_laneq_bf16(<8 x bfloat> %v) { +; CHECK-LABEL: test_vdupq_laneq_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 q0, d1[3] +; CHECK-NEXT: bx lr +entry: + %lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <8 x i32> + ret <8 x bfloat> %lane +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vcombine_bf16(<4 x bfloat> %low, <4 x bfloat> %high) { +; CHECK-LABEL: test_vcombine_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $d1 killed $d1 killed $q0 def $q0 +; CHECK-NEXT: @ kill: def $d0 killed $d0 killed $q0 def $q0 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <4 x bfloat> %low, <4 x bfloat> %high, <8 x i32> + ret <8 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vget_high_bf16(<8 x bfloat> %a) { +; CHECK-LABEL: test_vget_high_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f64 d0, d1 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vget_low_bf16(<8 x bfloat> %a) { +; CHECK-LABEL: test_vget_low_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: bx lr +entry: + %shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> + ret <4 x bfloat> %shuffle.i +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vstr.16 s3, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 6 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.u16 r1, d1[3] +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vstr.16 s0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 7 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vstr.16 s1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 2 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.u16 r1, d0[1] +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vstr.16 s0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 1 + ret bfloat %0 +} + +define arm_aapcs_vfpcc <4 x bfloat> @test_vset_lane_bf16(bfloat %a, <4 x bfloat> %v) { +; CHECK-LABEL: test_vset_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 r0, s0 +; CHECK-NEXT: vmov.16 d1[1], r0 +; CHECK-NEXT: vorr d0, d1, d1 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <4 x bfloat> %v, bfloat %a, i32 1 + ret <4 x bfloat> %0 +} + +define arm_aapcs_vfpcc <8 x bfloat> @test_vsetq_lane_bf16(bfloat %a, <8 x bfloat> %v) { +; CHECK-LABEL: test_vsetq_lane_bf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 r0, s0 +; CHECK-NEXT: vmov.16 d3[3], r0 +; CHECK-NEXT: vorr q0, q1, q1 +; CHECK-NEXT: bx lr +entry: + %0 = insertelement <8 x bfloat> %v, bfloat %a, i32 7 + ret <8 x bfloat> %0 +} Index: llvm/test/CodeGen/ARM/bf16-getlane-with-fp16.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/ARM/bf16-getlane-with-fp16.ll @@ -0,0 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon,+fullfp16 < %s | FileCheck %s + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8.6a-arm-none-eabi" + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vstr.16 s3, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 6 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) { +; CHECK-LABEL: test_vgetq_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s3 +; CHECK-NEXT: vstr.16 s0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <8 x bfloat> %v, i32 7 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_even: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vstr.16 s1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 2 + ret bfloat %0 +} + +define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) { +; CHECK-LABEL: test_vget_lane_bf16_odd: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovx.f16 s0, s0 +; CHECK-NEXT: vstr.16 s0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <4 x bfloat> %v, i32 1 + ret bfloat %0 +}