Index: llvm/lib/Target/ARM/ARMInstrVFP.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrVFP.td +++ llvm/lib/Target/ARM/ARMInstrVFP.td @@ -1005,6 +1005,9 @@ } } + def : InstAlias(NAME#"H") HPR:$Sd, HPR:$Sm), 0>, + Requires<[HasFullFP16]>; def : InstAlias(NAME#"S") SPR:$Sd, SPR:$Sm), 0>, Requires<[HasFPARMv8]>; Index: llvm/test/MC/ARM/fullfp16-neg.s =================================================================== --- llvm/test/MC/ARM/fullfp16-neg.s +++ llvm/test/MC/ARM/fullfp16-neg.s @@ -123,24 +123,38 @@ @ CHECK: instruction requires: full half-float vrintz.f16 s3, s24 + vrintz.f16.f16 s3, s24 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrintr.f16 s0, s9 + vrintr.f16.f16 s0, s9 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrintx.f16 s10, s14 + vrintx.f16.f16 s10, s14 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrinta.f16 s12, s1 + vrinta.f16.f16 s12, s1 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrintn.f16 s12, s1 + vrintn.f16.f16 s12, s1 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrintp.f16 s12, s1 + vrintp.f16.f16 s12, s1 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vrintm.f16 s12, s1 + vrintm.f16.f16 s12, s1 +@ CHECK: instruction requires: full half-float @ CHECK: instruction requires: full half-float vfma.f16 s2, s7, s4 Index: llvm/test/MC/ARM/fullfp16.s =================================================================== --- llvm/test/MC/ARM/fullfp16.s +++ llvm/test/MC/ARM/fullfp16.s @@ -169,32 +169,53 @@ @ THUMB: vminnm.f16 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x09] vrintz.f16 s3, s24 + vrintz.f16.f16 s3, s24 @ ARM: vrintz.f16 s3, s24 @ encoding: [0xcc,0x19,0xf6,0xee] +@ ARM: vrintz.f16 s3, s24 @ encoding: [0xcc,0x19,0xf6,0xee] +@ THUMB: vrintz.f16 s3, s24 @ encoding: [0xf6,0xee,0xcc,0x19] @ THUMB: vrintz.f16 s3, s24 @ encoding: [0xf6,0xee,0xcc,0x19] vrintr.f16 s0, s9 + vrintr.f16.f16 s0, s9 @ ARM: vrintr.f16 s0, s9 @ encoding: [0x64,0x09,0xb6,0xee] +@ ARM: vrintr.f16 s0, s9 @ encoding: [0x64,0x09,0xb6,0xee] +@ THUMB: vrintr.f16 s0, s9 @ encoding: [0xb6,0xee,0x64,0x09] @ THUMB: vrintr.f16 s0, s9 @ encoding: [0xb6,0xee,0x64,0x09] vrintx.f16 s10, s14 + vrintx.f16.f16 s10, s14 @ ARM: vrintx.f16 s10, s14 @ encoding: [0x47,0x59,0xb7,0xee] +@ ARM: vrintx.f16 s10, s14 @ encoding: [0x47,0x59,0xb7,0xee] +@ THUMB: vrintx.f16 s10, s14 @ encoding: [0xb7,0xee,0x47,0x59] @ THUMB: vrintx.f16 s10, s14 @ encoding: [0xb7,0xee,0x47,0x59] vrinta.f16 s12, s1 + vrinta.f16.f16 s12, s1 +@ ARM: vrinta.f16 s12, s1 @ encoding: [0x60,0x69,0xb8,0xfe] @ ARM: vrinta.f16 s12, s1 @ encoding: [0x60,0x69,0xb8,0xfe] @ THUMB: vrinta.f16 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x69] +@ THUMB: vrinta.f16 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x69] vrintn.f16 s12, s1 + vrintn.f16.f16 s12, s1 +@ ARM: vrintn.f16 s12, s1 @ encoding: [0x60,0x69,0xb9,0xfe] @ ARM: vrintn.f16 s12, s1 @ encoding: [0x60,0x69,0xb9,0xfe] @ THUMB: vrintn.f16 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x69] +@ THUMB: vrintn.f16 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x69] vrintp.f16 s12, s1 + vrintp.f16.f16 s12, s1 +@ ARM: vrintp.f16 s12, s1 @ encoding: [0x60,0x69,0xba,0xfe] @ ARM: vrintp.f16 s12, s1 @ encoding: [0x60,0x69,0xba,0xfe] @ THUMB: vrintp.f16 s12, s1 @ encoding: [0xba,0xfe,0x60,0x69] +@ THUMB: vrintp.f16 s12, s1 @ encoding: [0xba,0xfe,0x60,0x69] vrintm.f16 s12, s1 + vrintm.f16.f16 s12, s1 +@ ARM: vrintm.f16 s12, s1 @ encoding: [0x60,0x69,0xbb,0xfe] @ ARM: vrintm.f16 s12, s1 @ encoding: [0x60,0x69,0xbb,0xfe] @ THUMB: vrintm.f16 s12, s1 @ encoding: [0xbb,0xfe,0x60,0x69] +@ THUMB: vrintm.f16 s12, s1 @ encoding: [0xbb,0xfe,0x60,0x69] vfma.f16 s2, s7, s4 @ ARM: vfma.f16 s2, s7, s4 @ encoding: [0x82,0x19,0xa3,0xee]