Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4924,7 +4924,8 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { // We can materialize #0.0 as fmov $Rd, XZR for 64-bit and 32-bit cases. // FIXME: We should be able to handle f128 as well with a clever lowering. - if (Imm.isPosZero() && (VT == MVT::f16 || VT == MVT::f64 || VT == MVT::f32)) { + if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32 || + (VT == MVT::f16 && Subtarget->hasFullFP16()))) { DEBUG(dbgs() << "Legal fp imm: materialize 0 using the zero register\n"); return true; } Index: lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.td +++ lib/Target/AArch64/AArch64InstrInfo.td @@ -2717,7 +2717,7 @@ // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in { def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>, - Sched<[WriteF]>; + Sched<[WriteF]>, Requires<[HasFullFP16]>; def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>, Sched<[WriteF]>; def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>, Index: test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll =================================================================== --- test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll +++ test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll @@ -16,11 +16,11 @@ entry: ; ALL-LABEL: t1: ; ALL-NOT: fmov -; CYCLONE: fmov h0, wzr +; CYCLONE: ldr h0,{{.*}} ; CYCLONE: fmov s1, wzr ; CYCLONE: fmov d2, xzr ; CYCLONE: movi.16b v3, #0 -; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000 +; OTHERS: ldr h0,{{.*}} ; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000 ; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000 ; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000