diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -6347,12 +6347,19 @@ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), (!cast(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub), ssub))>; - def : Pat<(i64 (intOp (v4i32 V128:$Rn))), (i64 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), (!cast(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub), dsub))>; + +def : Pat<(v4i32 (vector_insert (v4i32 V128:$Rn), + (i32 (intOp (v8i16 V128:$Rm))), VectorIndexD:$imm)), + (INSvi32lane V128:$Rn, VectorIndexD:$imm, + (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), + (!cast(!strconcat(baseOpc, "v8i16v")) V128:$Rm), ssub), + (i64 0))>; + } defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>; diff --git a/llvm/test/CodeGen/AArch64/neon-scalar-to-vector.ll b/llvm/test/CodeGen/AArch64/neon-scalar-to-vector.ll --- a/llvm/test/CodeGen/AArch64/neon-scalar-to-vector.ll +++ b/llvm/test/CodeGen/AArch64/neon-scalar-to-vector.ll @@ -9,8 +9,7 @@ ; CHECK-NEXT: movi.2d v0, #0000000000000000 ; CHECK-NEXT: movi d1, #0000000000000000 ; CHECK-NEXT: uaddlv.8h s0, v0 -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov.s v1[0], w8 +; CHECK-NEXT: mov.s v1[0], v0[0] ; CHECK-NEXT: ucvtf.2s v0, v1 ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret