Index: lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.td +++ lib/Target/AArch64/AArch64InstrInfo.td @@ -5327,6 +5327,8 @@ (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)), (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; +def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)), + (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>; def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (and FPR32:$Rn, (i32 65535)), vecshiftR16:$imm)), Index: test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll =================================================================== --- test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll +++ test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll @@ -342,3 +342,13 @@ %0 = trunc i32 %facg to i16 ret i16 %0 } + +define dso_local half @vcvth_n_f16_s64_test(i64 %a) { +; CHECK-LABEL: vcvth_n_f16_s64_test: +; CHECK: fmov d0, x0 +; CHECK-NEXT: scvtf h0, h0, #16 +; CHECK-NEXT: ret +entry: + %vcvth_n_f16_s64 = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 16) + ret half %vcvth_n_f16_s64 +}