diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -5177,6 +5177,16 @@ (i64 0)), dsub)>; +def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0), + (i64 VectorIndexH:$imm)), + (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>; +def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0), + (i64 VectorIndexS:$imm)), + (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>; +def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0), + (i64 VectorIndexD:$imm)), + (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>; + def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn), (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))), (INSvi16lane diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll --- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple -mattr="+fullfp16" | FileCheck %s define void @test0f(float* nocapture %x, float %a) #0 { ; CHECK-LABEL: test0f: @@ -109,10 +109,8 @@ ; CHECK-LABEL: test_insert_v8f16_insert_1: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $h0 killed $h0 def $q0 -; CHECK-NEXT: adrp x8, .LCPI6_0 ; CHECK-NEXT: dup.8h v0, v0[0] -; CHECK-NEXT: add x8, x8, :lo12:.LCPI6_0 -; CHECK-NEXT: ld1.h { v0 }[7], [x8] +; CHECK-NEXT: mov.h v0[7], wzr ; CHECK-NEXT: ret %v.0 = insertelement <8 x half> , half %a, i32 0 %v.1 = insertelement <8 x half> %v.0, half %a, i32 1 @@ -217,9 +215,8 @@ ; CHECK-LABEL: test_insert_3_f32_undef_zero_vector: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: fmov s1, wzr ; CHECK-NEXT: dup.4s v0, v0[0] -; CHECK-NEXT: mov.s v0[3], v1[0] +; CHECK-NEXT: mov.s v0[3], wzr ; CHECK-NEXT: ret %v.0 = insertelement <4 x float> , float %a, i32 0 %v.1 = insertelement <4 x float> %v.0, float %a, i32 1 @@ -277,6 +274,7 @@ %v.1 = insertelement <4 x float> %v.0, float %a, i32 2 ret <4 x float> %v.1 } + define <8 x i16> @test_insert_v8i16_i16_zero(<8 x i16> %a) { ; CHECK-LABEL: test_insert_v8i16_i16_zero: ; CHECK: // %bb.0: @@ -290,10 +288,8 @@ define <4 x half> @test_insert_v4f16_f16_zero(<4 x half> %a) { ; CHECK-LABEL: test_insert_v4f16_f16_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI19_0 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: add x8, x8, :lo12:.LCPI19_0 -; CHECK-NEXT: ld1.h { v0 }[0], [x8] +; CHECK-NEXT: mov.h v0[0], wzr ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret %v.0 = insertelement <4 x half> %a, half 0.000000e+00, i32 0 @@ -303,9 +299,7 @@ define <8 x half> @test_insert_v8f16_f16_zero(<8 x half> %a) { ; CHECK-LABEL: test_insert_v8f16_f16_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI20_0 -; CHECK-NEXT: add x8, x8, :lo12:.LCPI20_0 -; CHECK-NEXT: ld1.h { v0 }[6], [x8] +; CHECK-NEXT: mov.h v0[6], wzr ; CHECK-NEXT: ret %v.0 = insertelement <8 x half> %a, half 0.000000e+00, i32 6 ret <8 x half> %v.0 @@ -315,8 +309,7 @@ ; CHECK-LABEL: test_insert_v2f32_f32_zero: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fmov s1, wzr -; CHECK-NEXT: mov.s v0[0], v1[0] +; CHECK-NEXT: mov.s v0[0], wzr ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret %v.0 = insertelement <2 x float> %a, float 0.000000e+00, i32 0 @@ -326,8 +319,7 @@ define <4 x float> @test_insert_v4f32_f32_zero(<4 x float> %a) { ; CHECK-LABEL: test_insert_v4f32_f32_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov s1, wzr -; CHECK-NEXT: mov.s v0[3], v1[0] +; CHECK-NEXT: mov.s v0[3], wzr ; CHECK-NEXT: ret %v.0 = insertelement <4 x float> %a, float 0.000000e+00, i32 3 ret <4 x float> %v.0 @@ -336,8 +328,7 @@ define <2 x double> @test_insert_v2f64_f64_zero(<2 x double> %a) { ; CHECK-LABEL: test_insert_v2f64_f64_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov d1, xzr -; CHECK-NEXT: mov.d v0[1], v1[0] +; CHECK-NEXT: mov.d v0[1], xzr ; CHECK-NEXT: ret %v.0 = insertelement <2 x double> %a, double 0.000000e+00, i32 1 ret <2 x double> %v.0 diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization.ll @@ -47,8 +47,7 @@ define float @test_v3f32(<3 x float> %a) nounwind { ; CHECK-LABEL: test_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov s1, wzr -; CHECK-NEXT: mov v0.s[3], v1.s[0] +; CHECK-NEXT: mov v0.s[3], wzr ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s ; CHECK-NEXT: faddp s0, v0.2s