Index: lib/Target/ARM/ARM.td =================================================================== --- lib/Target/ARM/ARM.td +++ lib/Target/ARM/ARM.td @@ -45,7 +45,7 @@ def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true", "Enable VFP4 instructions", [FeatureVFP3, FeatureFP16]>; -def FeatureV8FP : SubtargetFeature<"v8fp", "HasV8FP", +def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true", "Enable ARMv8 FP", [FeatureVFP4]>; def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true", Index: lib/Target/ARM/ARMAsmPrinter.cpp =================================================================== --- lib/Target/ARM/ARMAsmPrinter.cpp +++ lib/Target/ARM/ARMAsmPrinter.cpp @@ -808,7 +808,7 @@ if (Subtarget->hasNEON() && emitFPU) { /* NEON is not exactly a VFP architecture, but GAS emit one of * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */ - if (Subtarget->hasV8FP()) + if (Subtarget->hasFPARMv8()) AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch, "neon-fp-armv8"); else if (Subtarget->hasVFP4()) @@ -821,10 +821,10 @@ emitFPU = false; } - /* V8FP + .fpu */ - if (Subtarget->hasV8FP()) { + /* FPARMv8 + .fpu */ + if (Subtarget->hasFPARMv8()) { AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch, - ARMBuildAttrs::AllowV8FPA); + ARMBuildAttrs::AllowFPARMv8A); if (emitFPU) AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "fp-armv8"); /* VFPv4 + .fpu */ Index: lib/Target/ARM/ARMBuildAttrs.h =================================================================== --- lib/Target/ARM/ARMBuildAttrs.h +++ lib/Target/ARM/ARMBuildAttrs.h @@ -114,8 +114,8 @@ AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31 AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA) AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31 - AllowV8FPA = 7, // Use of the ARM v8-A FP ISA was permitted - AllowV8FPB = 8, // Use of the ARM v8-A FP ISA was permitted, but only D0-D15, S0-S31 + AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted + AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only D0-D15, S0-S31 // Tag_WMMX_arch, (=11), uleb128 AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions) Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -3258,7 +3258,7 @@ // inverting the compare condition, swapping 'less' and 'greater') and // sometimes need to swap the operands to the VSEL (which inverts the // condition in the sense of firing whenever the previous condition didn't) - if (getSubtarget()->hasV8FP() && (TrueVal.getValueType() == MVT::f32 || + if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || TrueVal.getValueType() == MVT::f64)) { ARMCC::CondCodes CondCode = IntCCToARMCC(CC); if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || @@ -3279,7 +3279,7 @@ FPCCToARMCC(CC, CondCode, CondCode2); // Try to generate VSEL on ARMv8. - if (getSubtarget()->hasV8FP() && (TrueVal.getValueType() == MVT::f32 || + if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || TrueVal.getValueType() == MVT::f64)) { // We can select VMAXNM/VMINNM from a compare followed by a select with the // same operands, as follows: Index: lib/Target/ARM/ARMInstrInfo.td =================================================================== --- lib/Target/ARM/ARMInstrInfo.td +++ lib/Target/ARM/ARMInstrInfo.td @@ -208,8 +208,8 @@ AssemblerPredicate<"FeatureVFP3", "VFP3">; def HasVFP4 : Predicate<"Subtarget->hasVFP4()">, AssemblerPredicate<"FeatureVFP4", "VFP4">; -def HasV8FP : Predicate<"Subtarget->hasV8FP()">, - AssemblerPredicate<"FeatureV8FP", "V8FP">; +def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">, + AssemblerPredicate<"FeatureFPARMv8", "FPARMv8">; def HasNEON : Predicate<"Subtarget->hasNEON()">, AssemblerPredicate<"FeatureNEON", "NEON">; def HasFP16 : Predicate<"Subtarget->hasFP16()">, Index: lib/Target/ARM/ARMInstrVFP.td =================================================================== --- lib/Target/ARM/ARMInstrVFP.td +++ lib/Target/ARM/ARMInstrVFP.td @@ -340,13 +340,13 @@ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>, - Requires<[HasV8FP]>; + Requires<[HasFPARMv8]>; def D : ADbInp<0b11100, opc, 0, (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>, - Requires<[HasV8FP]>; + Requires<[HasFPARMv8]>; } } @@ -362,13 +362,13 @@ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>, - Requires<[HasV8FP]>; + Requires<[HasFPARMv8]>; def D : ADbInp<0b11101, 0b00, opc, (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>, - Requires<[HasV8FP]>; + Requires<[HasFPARMv8]>; } } @@ -538,7 +538,7 @@ def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs DPR:$Dd), (ins SPR:$Sm), NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { // Instruction operands. bits<5> Sm; @@ -550,7 +550,7 @@ def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins DPR:$Dm), NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { // Instruction operands. bits<5> Sd; bits<5> Dm; @@ -565,7 +565,7 @@ def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs DPR:$Dd), (ins SPR:$Sm), NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { // Instruction operands. bits<5> Sm; @@ -577,7 +577,7 @@ def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins DPR:$Dm), NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { // Instruction operands. bits<5> Sd; bits<5> Dm; @@ -594,21 +594,21 @@ def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{17-16} = rm; } def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{17-16} = rm; } def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, (outs SPR:$Sd), (ins DPR:$Dm), NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { bits<5> Dm; let Inst{17-16} = rm; @@ -622,7 +622,7 @@ def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, (outs SPR:$Sd), (ins DPR:$Dm), NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { bits<5> Dm; let Inst{17-16} = rm; @@ -658,14 +658,14 @@ def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{7} = op2; let Inst{16} = op; } def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, (outs DPR:$Dd), (ins DPR:$Dm), NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{7} = op2; let Inst{16} = op; } @@ -685,13 +685,13 @@ def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{17-16} = rm; } def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, (outs DPR:$Dd), (ins DPR:$Dm), NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), - []>, Requires<[HasV8FP]> { + []>, Requires<[HasFPARMv8]> { let Inst{17-16} = rm; } } Index: lib/Target/ARM/ARMSubtarget.h =================================================================== --- lib/Target/ARM/ARMSubtarget.h +++ lib/Target/ARM/ARMSubtarget.h @@ -48,12 +48,12 @@ bool HasV7Ops; bool HasV8Ops; - /// HasVFPv2, HasVFPv3, HasVFPv4, HasV8FP, HasNEON - Specify what + /// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what /// floating point ISAs are supported. bool HasVFPv2; bool HasVFPv3; bool HasVFPv4; - bool HasV8FP; + bool HasFPARMv8; bool HasNEON; /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been @@ -246,7 +246,7 @@ bool hasVFP2() const { return HasVFPv2; } bool hasVFP3() const { return HasVFPv3; } bool hasVFP4() const { return HasVFPv4; } - bool hasV8FP() const { return HasV8FP; } + bool hasFPARMv8() const { return HasFPARMv8; } bool hasNEON() const { return HasNEON; } bool useNEONForSinglePrecisionFP() const { return hasNEON() && UseNEONForSinglePrecisionFP; } Index: lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- lib/Target/ARM/ARMSubtarget.cpp +++ lib/Target/ARM/ARMSubtarget.cpp @@ -81,7 +81,7 @@ HasVFPv2 = false; HasVFPv3 = false; HasVFPv4 = false; - HasV8FP = false; + HasFPARMv8 = false; HasNEON = false; UseNEONForSinglePrecisionFP = false; UseMulOps = UseFusedMulOps; Index: test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll =================================================================== --- test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll +++ test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll @@ -1,9 +1,9 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=+v8fp | FileCheck %s --check-prefix=V8-V8FP +; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=+fp-armv8 | FileCheck %s --check-prefix=V8-FPARMv8 ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=+neon | FileCheck %s --check-prefix=V8-NEON -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=+v8fp -mattr=+neon | FileCheck %s --check-prefix=V8-V8FP-NEON +; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=+fp-armv8 -mattr=+neon | FileCheck %s --check-prefix=V8-FPARMv8-NEON ; This tests that MC/asm header conversion is smooth ; ; V7: .syntax unified @@ -20,19 +20,19 @@ ; Vt8: .syntax unified ; Vt8: .eabi_attribute 6, 14 -; V8-V8FP: .syntax unified -; V8-V8FP: .eabi_attribute 6, 14 -; V8-V8FP: .eabi_attribute 10, 7 -; V8-V8FP: .fpu fp-armv8 +; V8-FPARMv8: .syntax unified +; V8-FPARMv8: .eabi_attribute 6, 14 +; V8-FPARMv8: .eabi_attribute 10, 7 +; V8-FPARMv8: .fpu fp-armv8 ; V8-NEON: .syntax unified ; V8-NEON: .eabi_attribute 6, 14 ; V8-NEON: .eabi_attribute 12, 3 -; V8-V8FP-NEON: .syntax unified -; V8-V8FP-NEON: .eabi_attribute 6, 14 -; V8-V8FP-NEON: .fpu neon-fp-armv8 -; V8-V8FP-NEON: .eabi_attribute 10, 7 +; V8-FPARMv8-NEON: .syntax unified +; V8-FPARMv8-NEON: .eabi_attribute 6, 14 +; V8-FPARMv8-NEON: .fpu neon-fp-armv8 +; V8-FPARMv8-NEON: .eabi_attribute 10, 7 define i32 @f(i64 %z) { ret i32 0 Index: test/CodeGen/ARM/vminmaxnm.ll =================================================================== --- test/CodeGen/ARM/vminmaxnm.ll +++ test/CodeGen/ARM/vminmaxnm.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mtriple armv8 -mattr=+neon | FileCheck %s -; RUN: llc < %s -mtriple armv8 -mattr=+neon,+v8fp -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK-FAST +; RUN: llc < %s -mtriple armv8 -mattr=+neon,+fp-armv8 -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK-FAST define <4 x float> @vmaxnmq(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK: vmaxnmq @@ -37,44 +37,44 @@ ret <2 x float> %tmp3 } -define float @v8fp_vminnm_o(float %a, float %b) { -; CHECK-FAST: v8fp_vminnm_o +define float @fp-armv8_vminnm_o(float %a, float %b) { +; CHECK-FAST: fp-armv8_vminnm_o ; CHECK-FAST-NOT: vcmp ; CHECK-FAST: vminnm.f32 -; CHECK: v8fp_vminnm_o +; CHECK: fp-armv8_vminnm_o ; CHECK-NOT: vminnm.f32 %cmp = fcmp olt float %a, %b %cond = select i1 %cmp, float %a, float %b ret float %cond } -define float @v8fp_vminnm_u(float %a, float %b) { -; CHECK-FAST: v8fp_vminnm_u +define float @fp-armv8_vminnm_u(float %a, float %b) { +; CHECK-FAST: fp-armv8_vminnm_u ; CHECK-FAST-NOT: vcmp ; CHECK-FAST: vminnm.f32 -; CHECK: v8fp_vminnm_u +; CHECK: fp-armv8_vminnm_u ; CHECK-NOT: vminnm.f32 %cmp = fcmp ult float %a, %b %cond = select i1 %cmp, float %a, float %b ret float %cond } -define float @v8fp_vmaxnm_o(float %a, float %b) { -; CHECK-FAST: v8fp_vmaxnm_o +define float @fp-armv8_vmaxnm_o(float %a, float %b) { +; CHECK-FAST: fp-armv8_vmaxnm_o ; CHECK-FAST-NOT: vcmp ; CHECK-FAST: vmaxnm.f32 -; CHECK: v8fp_vmaxnm_o +; CHECK: fp-armv8_vmaxnm_o ; CHECK-NOT: vmaxnm.f32 %cmp = fcmp ogt float %a, %b %cond = select i1 %cmp, float %a, float %b ret float %cond } -define float @v8fp_vmaxnm_u(float %a, float %b) { -; CHECK-FAST: v8fp_vmaxnm_u +define float @fp-armv8_vmaxnm_u(float %a, float %b) { +; CHECK-FAST: fp-armv8_vmaxnm_u ; CHECK-FAST-NOT: vcmp ; CHECK-FAST: vmaxnm.f32 -; CHECK: v8fp_vmaxnm_u +; CHECK: fp-armv8_vmaxnm_u ; CHECK-NOT: vmaxnm.f32 %cmp = fcmp ugt float %a, %b %cond = select i1 %cmp, float %a, float %b Index: test/CodeGen/ARM/vsel.ll =================================================================== --- test/CodeGen/ARM/vsel.ll +++ test/CodeGen/ARM/vsel.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=armv8-linux-gnueabihf -mattr=+v8fp -float-abi=hard | FileCheck %s +; RUN: llc < %s -mtriple=armv8-linux-gnueabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s @varfloat = global float 0.0 @vardouble = global double 0.0 define void @test_vsel32sgt(i32 %lhs32, i32 %rhs32, float %a, float %b) { Index: test/MC/ARM/fp-armv8.s =================================================================== --- /dev/null +++ test/MC/ARM/fp-armv8.s @@ -0,0 +1,124 @@ +@ RUN: llvm-mc -triple armv8 -mattr=+fp-armv8 -show-encoding < %s | FileCheck %s + +@ VCVT{B,T} + + vcvtt.f64.f16 d3, s1 +@ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] + vcvtt.f16.f64 s5, d12 +@ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] + + vcvtb.f64.f16 d3, s1 +@ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee] + vcvtb.f16.f64 s4, d1 +@ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee] + + vcvttge.f64.f16 d3, s1 +@ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xae] + vcvttgt.f16.f64 s5, d12 +@ CHECK: vcvttgt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xce] + + vcvtbeq.f64.f16 d3, s1 +@ CHECK: vcvtbeq.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0x0e] + vcvtblt.f16.f64 s4, d1 +@ CHECK: vcvtblt.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xbe] + + +@ VCVT{A,N,P,M} + + vcvta.s32.f32 s2, s3 +@ CHECK: vcvta.s32.f32 s2, s3 @ encoding: [0xe1,0x1a,0xbc,0xfe] + vcvta.s32.f64 s2, d3 +@ CHECK: vcvta.s32.f64 s2, d3 @ encoding: [0xc3,0x1b,0xbc,0xfe] + vcvtn.s32.f32 s6, s23 +@ CHECK: vcvtn.s32.f32 s6, s23 @ encoding: [0xeb,0x3a,0xbd,0xfe] + vcvtn.s32.f64 s6, d23 +@ CHECK: vcvtn.s32.f64 s6, d23 @ encoding: [0xe7,0x3b,0xbd,0xfe] + vcvtp.s32.f32 s0, s4 +@ CHECK: vcvtp.s32.f32 s0, s4 @ encoding: [0xc2,0x0a,0xbe,0xfe] + vcvtp.s32.f64 s0, d4 +@ CHECK: vcvtp.s32.f64 s0, d4 @ encoding: [0xc4,0x0b,0xbe,0xfe] + vcvtm.s32.f32 s17, s8 +@ CHECK: vcvtm.s32.f32 s17, s8 @ encoding: [0xc4,0x8a,0xff,0xfe] + vcvtm.s32.f64 s17, d8 +@ CHECK: vcvtm.s32.f64 s17, d8 @ encoding: [0xc8,0x8b,0xff,0xfe] + + vcvta.u32.f32 s2, s3 +@ CHECK: vcvta.u32.f32 s2, s3 @ encoding: [0x61,0x1a,0xbc,0xfe] + vcvta.u32.f64 s2, d3 +@ CHECK: vcvta.u32.f64 s2, d3 @ encoding: [0x43,0x1b,0xbc,0xfe] + vcvtn.u32.f32 s6, s23 +@ CHECK: vcvtn.u32.f32 s6, s23 @ encoding: [0x6b,0x3a,0xbd,0xfe] + vcvtn.u32.f64 s6, d23 +@ CHECK: vcvtn.u32.f64 s6, d23 @ encoding: [0x67,0x3b,0xbd,0xfe] + vcvtp.u32.f32 s0, s4 +@ CHECK: vcvtp.u32.f32 s0, s4 @ encoding: [0x42,0x0a,0xbe,0xfe] + vcvtp.u32.f64 s0, d4 +@ CHECK: vcvtp.u32.f64 s0, d4 @ encoding: [0x44,0x0b,0xbe,0xfe] + vcvtm.u32.f32 s17, s8 +@ CHECK: vcvtm.u32.f32 s17, s8 @ encoding: [0x44,0x8a,0xff,0xfe] + vcvtm.u32.f64 s17, d8 +@ CHECK: vcvtm.u32.f64 s17, d8 @ encoding: [0x48,0x8b,0xff,0xfe] + + +@ VSEL + vselge.f32 s4, s1, s23 +@ CHECK: vselge.f32 s4, s1, s23 @ encoding: [0xab,0x2a,0x20,0xfe] + vselge.f64 d30, d31, d23 +@ CHECK: vselge.f64 d30, d31, d23 @ encoding: [0xa7,0xeb,0x6f,0xfe] + vselgt.f32 s0, s1, s0 +@ CHECK: vselgt.f32 s0, s1, s0 @ encoding: [0x80,0x0a,0x30,0xfe] + vselgt.f64 d5, d10, d20 +@ CHECK: vselgt.f64 d5, d10, d20 @ encoding: [0x24,0x5b,0x3a,0xfe] + vseleq.f32 s30, s28, s23 +@ CHECK: vseleq.f32 s30, s28, s23 @ encoding: [0x2b,0xfa,0x0e,0xfe] + vseleq.f64 d2, d4, d8 +@ CHECK: vseleq.f64 d2, d4, d8 @ encoding: [0x08,0x2b,0x04,0xfe] + vselvs.f32 s21, s16, s14 +@ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x07,0xaa,0x58,0xfe] + vselvs.f64 d0, d1, d31 +@ CHECK: vselvs.f64 d0, d1, d31 @ encoding: [0x2f,0x0b,0x11,0xfe] + + +@ VMAXNM / VMINNM + vmaxnm.f32 s5, s12, s0 +@ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0x00,0x2a,0xc6,0xfe] + vmaxnm.f64 d5, d22, d30 +@ CHECK: vmaxnm.f64 d5, d22, d30 @ encoding: [0xae,0x5b,0x86,0xfe] + vminnm.f32 s0, s0, s12 +@ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x46,0x0a,0x80,0xfe] + vminnm.f64 d4, d6, d9 +@ CHECK: vminnm.f64 d4, d6, d9 @ encoding: [0x49,0x4b,0x86,0xfe] + +@ VRINT{Z,R,X} + + vrintzge.f64 d3, d12 +@ CHECK: vrintzge.f64 d3, d12 @ encoding: [0xcc,0x3b,0xb6,0xae] + vrintz.f32 s3, s24 +@ CHECK: vrintz.f32 s3, s24 @ encoding: [0xcc,0x1a,0xf6,0xee] + vrintrlt.f64 d5, d0 +@ CHECK: vrintrlt.f64 d5, d0 @ encoding: [0x40,0x5b,0xb6,0xbe] + vrintr.f32 s0, s9 +@ CHECK: vrintr.f32 s0, s9 @ encoding: [0x64,0x0a,0xb6,0xee] + vrintxeq.f64 d28, d30 +@ CHECK: vrintxeq.f64 d28, d30 @ encoding: [0x6e,0xcb,0xf7,0x0e] + vrintxvs.f32 s10, s14 +@ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0x47,0x5a,0xb7,0x6e] + +@ VRINT{A,N,P,M} + + vrinta.f64 d3, d4 +@ CHECK: vrinta.f64 d3, d4 @ encoding: [0x44,0x3b,0xb8,0xfe] + vrinta.f32 s12, s1 +@ CHECK: vrinta.f32 s12, s1 @ encoding: [0x60,0x6a,0xb8,0xfe] + vrintn.f64 d3, d4 +@ CHECK: vrintn.f64 d3, d4 @ encoding: [0x44,0x3b,0xb9,0xfe] + vrintn.f32 s12, s1 +@ CHECK: vrintn.f32 s12, s1 @ encoding: [0x60,0x6a,0xb9,0xfe] + vrintp.f64 d3, d4 +@ CHECK: vrintp.f64 d3, d4 @ encoding: [0x44,0x3b,0xba,0xfe] + vrintp.f32 s12, s1 +@ CHECK: vrintp.f32 s12, s1 @ encoding: [0x60,0x6a,0xba,0xfe] + vrintm.f64 d3, d4 +@ CHECK: vrintm.f64 d3, d4 @ encoding: [0x44,0x3b,0xbb,0xfe] + vrintm.f32 s12, s1 +@ CHECK: vrintm.f32 s12, s1 @ encoding: [0x60,0x6a,0xbb,0xfe] Index: test/MC/ARM/invalid-fp-armv8.s =================================================================== --- /dev/null +++ test/MC/ARM/invalid-fp-armv8.s @@ -0,0 +1,89 @@ +@ RUN: not llvm-mc -triple armv8 -show-encoding -mattr=+fp-armv8 < %s 2>&1 | FileCheck %s --check-prefix=V8 + +@ VCVT{B,T} + + vcvtt.f64.f16 d3, s1 +@ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] + vcvtt.f16.f64 s5, d12 +@ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] + + vsel.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselne.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselmi.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselpl.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselvc.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselcs.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselcc.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselhs.f32 s3, s4, s6 +@ V8: error: invalid instruction + vsello.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselhi.f32 s3, s4, s6 +@ V8: error: invalid instruction + vsells.f32 s3, s4, s6 +@ V8: error: invalid instruction + vsellt.f32 s3, s4, s6 +@ V8: error: invalid instruction + vselle.f32 s3, s4, s6 +@ V8: error: invalid instruction + +vseleq.f32 s0, d2, d1 +@ V8: error: invalid operand for instruction +vselgt.f64 s3, s2, s1 +@ V8: error: invalid operand for instruction +vselgt.f32 s0, q3, q1 +@ V8: error: invalid operand for instruction +vselgt.f64 q0, s3, q1 +@ V8: error: invalid operand for instruction + +vmaxnm.f32 s0, d2, d1 +@ V8: error: invalid operand for instruction +vminnm.f64 s3, s2, s1 +@ V8: error: invalid operand for instruction +vmaxnm.f32 s0, q3, q1 +@ V8: error: invalid operand for instruction +vmaxnm.f64 q0, s3, q1 +@ V8: error: invalid operand for instruction +vmaxnmgt.f64 q0, s3, q1 +@ CHECK: error: instruction 'vmaxnm' is not predicable, but condition code specified + +vcvta.s32.f64 d3, s2 +@ V8: error: invalid operand for instruction +vcvtp.s32.f32 d3, s2 +@ V8: error: invalid operand for instruction +vcvtn.u32.f64 d3, s2 +@ V8: error: invalid operand for instruction +vcvtm.u32.f32 d3, s2 +@ V8: error: invalid operand for instruction +vcvtnge.u32.f64 d3, s2 +@ V8: error: instruction 'vcvtn' is not predicable, but condition code specified + +vcvtbgt.f64.f16 q0, d3 +@ V8: error: invalid operand for instruction +vcvttlt.f64.f16 s0, s3 +@ V8: error: invalid operand for instruction +vcvttvs.f16.f64 s0, s3 +@ V8: error: invalid operand for instruction +vcvtthi.f16.f64 q0, d3 +@ V8: error: invalid operand for instruction + +vrintrlo.f32.f32 d3, q0 +@ V8: error: invalid operand for instruction +vrintxcs.f32.f32 d3, d0 +@ V8: error: instruction requires: NEON + +vrinta.f64.f64 s3, q0 +@ V8: error: invalid operand for instruction +vrintn.f32.f32 d3, d0 +@ V8: error: instruction requires: NEON +vrintp.f32 q3, q0 +@ V8: error: instruction requires: NEON +vrintmlt.f32 q3, q0 +@ V8: error: instruction 'vrintm' is not predicable, but condition code specified Index: test/MC/ARM/invalid-neon-v8.s =================================================================== --- test/MC/ARM/invalid-neon-v8.s +++ test/MC/ARM/invalid-neon-v8.s @@ -8,7 +8,7 @@ @ CHECK: error: instruction 'vmaxnm' is not predicable, but condition code specified vcvta.s32.f32 s1, s2 -@ CHECK: error: instruction requires: V8FP +@ CHECK: error: instruction requires: FPARMv8 vcvtp.u32.f32 s1, d2 @ CHECK: error: invalid operand for instruction vcvtp.f32.u32 d1, q2 Index: test/MC/ARM/invalid-v8fp.s =================================================================== --- test/MC/ARM/invalid-v8fp.s +++ /dev/null @@ -1,89 +0,0 @@ -@ RUN: not llvm-mc -triple armv8 -show-encoding -mattr=+v8fp < %s 2>&1 | FileCheck %s --check-prefix=V8 - -@ VCVT{B,T} - - vcvtt.f64.f16 d3, s1 -@ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] - vcvtt.f16.f64 s5, d12 -@ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] - - vsel.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselne.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselmi.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselpl.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselvc.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselcs.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselcc.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselhs.f32 s3, s4, s6 -@ V8: error: invalid instruction - vsello.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselhi.f32 s3, s4, s6 -@ V8: error: invalid instruction - vsells.f32 s3, s4, s6 -@ V8: error: invalid instruction - vsellt.f32 s3, s4, s6 -@ V8: error: invalid instruction - vselle.f32 s3, s4, s6 -@ V8: error: invalid instruction - -vseleq.f32 s0, d2, d1 -@ V8: error: invalid operand for instruction -vselgt.f64 s3, s2, s1 -@ V8: error: invalid operand for instruction -vselgt.f32 s0, q3, q1 -@ V8: error: invalid operand for instruction -vselgt.f64 q0, s3, q1 -@ V8: error: invalid operand for instruction - -vmaxnm.f32 s0, d2, d1 -@ V8: error: invalid operand for instruction -vminnm.f64 s3, s2, s1 -@ V8: error: invalid operand for instruction -vmaxnm.f32 s0, q3, q1 -@ V8: error: invalid operand for instruction -vmaxnm.f64 q0, s3, q1 -@ V8: error: invalid operand for instruction -vmaxnmgt.f64 q0, s3, q1 -@ CHECK: error: instruction 'vmaxnm' is not predicable, but condition code specified - -vcvta.s32.f64 d3, s2 -@ V8: error: invalid operand for instruction -vcvtp.s32.f32 d3, s2 -@ V8: error: invalid operand for instruction -vcvtn.u32.f64 d3, s2 -@ V8: error: invalid operand for instruction -vcvtm.u32.f32 d3, s2 -@ V8: error: invalid operand for instruction -vcvtnge.u32.f64 d3, s2 -@ V8: error: instruction 'vcvtn' is not predicable, but condition code specified - -vcvtbgt.f64.f16 q0, d3 -@ V8: error: invalid operand for instruction -vcvttlt.f64.f16 s0, s3 -@ V8: error: invalid operand for instruction -vcvttvs.f16.f64 s0, s3 -@ V8: error: invalid operand for instruction -vcvtthi.f16.f64 q0, d3 -@ V8: error: invalid operand for instruction - -vrintrlo.f32.f32 d3, q0 -@ V8: error: invalid operand for instruction -vrintxcs.f32.f32 d3, d0 -@ V8: error: instruction requires: NEON - -vrinta.f64.f64 s3, q0 -@ V8: error: invalid operand for instruction -vrintn.f32.f32 d3, d0 -@ V8: error: instruction requires: NEON -vrintp.f32 q3, q0 -@ V8: error: instruction requires: NEON -vrintmlt.f32 q3, q0 -@ V8: error: instruction 'vrintm' is not predicable, but condition code specified Index: test/MC/ARM/thumb-fp-armv8.s =================================================================== --- /dev/null +++ test/MC/ARM/thumb-fp-armv8.s @@ -0,0 +1,130 @@ +@ RUN: llvm-mc -triple thumbv8 -mattr=+fp-armv8 -show-encoding < %s | FileCheck %s + +@ VCVT{B,T} + + vcvtt.f64.f16 d3, s1 +@ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] + vcvtt.f16.f64 s5, d12 +@ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] + + vcvtb.f64.f16 d3, s1 +@ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] + vcvtb.f16.f64 s4, d1 +@ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] + + it ge + vcvttge.f64.f16 d3, s1 +@ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] + it gt + vcvttgt.f16.f64 s5, d12 +@ CHECK: vcvttgt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] + it eq + vcvtbeq.f64.f16 d3, s1 +@ CHECK: vcvtbeq.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] + it lt + vcvtblt.f16.f64 s4, d1 +@ CHECK: vcvtblt.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] + + +@ VCVT{A,N,P,M} + + vcvta.s32.f32 s2, s3 +@ CHECK: vcvta.s32.f32 s2, s3 @ encoding: [0xbc,0xfe,0xe1,0x1a] + vcvta.s32.f64 s2, d3 +@ CHECK: vcvta.s32.f64 s2, d3 @ encoding: [0xbc,0xfe,0xc3,0x1b] + vcvtn.s32.f32 s6, s23 +@ CHECK: vcvtn.s32.f32 s6, s23 @ encoding: [0xbd,0xfe,0xeb,0x3a] + vcvtn.s32.f64 s6, d23 +@ CHECK: vcvtn.s32.f64 s6, d23 @ encoding: [0xbd,0xfe,0xe7,0x3b] + vcvtp.s32.f32 s0, s4 +@ CHECK: vcvtp.s32.f32 s0, s4 @ encoding: [0xbe,0xfe,0xc2,0x0a] + vcvtp.s32.f64 s0, d4 +@ CHECK: vcvtp.s32.f64 s0, d4 @ encoding: [0xbe,0xfe,0xc4,0x0b] + vcvtm.s32.f32 s17, s8 +@ CHECK: vcvtm.s32.f32 s17, s8 @ encoding: [0xff,0xfe,0xc4,0x8a] + vcvtm.s32.f64 s17, d8 +@ CHECK: vcvtm.s32.f64 s17, d8 @ encoding: [0xff,0xfe,0xc8,0x8b] + + vcvta.u32.f32 s2, s3 +@ CHECK: vcvta.u32.f32 s2, s3 @ encoding: [0xbc,0xfe,0x61,0x1a] + vcvta.u32.f64 s2, d3 +@ CHECK: vcvta.u32.f64 s2, d3 @ encoding: [0xbc,0xfe,0x43,0x1b] + vcvtn.u32.f32 s6, s23 +@ CHECK: vcvtn.u32.f32 s6, s23 @ encoding: [0xbd,0xfe,0x6b,0x3a] + vcvtn.u32.f64 s6, d23 +@ CHECK: vcvtn.u32.f64 s6, d23 @ encoding: [0xbd,0xfe,0x67,0x3b] + vcvtp.u32.f32 s0, s4 +@ CHECK: vcvtp.u32.f32 s0, s4 @ encoding: [0xbe,0xfe,0x42,0x0a] + vcvtp.u32.f64 s0, d4 +@ CHECK: vcvtp.u32.f64 s0, d4 @ encoding: [0xbe,0xfe,0x44,0x0b] + vcvtm.u32.f32 s17, s8 +@ CHECK: vcvtm.u32.f32 s17, s8 @ encoding: [0xff,0xfe,0x44,0x8a] + vcvtm.u32.f64 s17, d8 +@ CHECK: vcvtm.u32.f64 s17, d8 @ encoding: [0xff,0xfe,0x48,0x8b] + + +@ VSEL + vselge.f32 s4, s1, s23 +@ CHECK: vselge.f32 s4, s1, s23 @ encoding: [0x20,0xfe,0xab,0x2a] + vselge.f64 d30, d31, d23 +@ CHECK: vselge.f64 d30, d31, d23 @ encoding: [0x6f,0xfe,0xa7,0xeb] + vselgt.f32 s0, s1, s0 +@ CHECK: vselgt.f32 s0, s1, s0 @ encoding: [0x30,0xfe,0x80,0x0a] + vselgt.f64 d5, d10, d20 +@ CHECK: vselgt.f64 d5, d10, d20 @ encoding: [0x3a,0xfe,0x24,0x5b] + vseleq.f32 s30, s28, s23 +@ CHECK: vseleq.f32 s30, s28, s23 @ encoding: [0x0e,0xfe,0x2b,0xfa] + vseleq.f64 d2, d4, d8 +@ CHECK: vseleq.f64 d2, d4, d8 @ encoding: [0x04,0xfe,0x08,0x2b] + vselvs.f32 s21, s16, s14 +@ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x58,0xfe,0x07,0xaa] + vselvs.f64 d0, d1, d31 +@ CHECK: vselvs.f64 d0, d1, d31 @ encoding: [0x11,0xfe,0x2f,0x0b] + + +@ VMAXNM / VMINNM + vmaxnm.f32 s5, s12, s0 +@ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x2a] + vmaxnm.f64 d5, d22, d30 +@ CHECK: vmaxnm.f64 d5, d22, d30 @ encoding: [0x86,0xfe,0xae,0x5b] + vminnm.f32 s0, s0, s12 +@ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x0a] + vminnm.f64 d4, d6, d9 +@ CHECK: vminnm.f64 d4, d6, d9 @ encoding: [0x86,0xfe,0x49,0x4b] + +@ VRINT{Z,R,X} + it ge + vrintzge.f64 d3, d12 +@ CHECK: vrintzge.f64 d3, d12 @ encoding: [0xb6,0xee,0xcc,0x3b] + vrintz.f32 s3, s24 +@ CHECK: vrintz.f32 s3, s24 @ encoding: [0xf6,0xee,0xcc,0x1a] + it lt + vrintrlt.f64 d5, d0 +@ CHECK: vrintrlt.f64 d5, d0 @ encoding: [0xb6,0xee,0x40,0x5b] + vrintr.f32 s0, s9 +@ CHECK: vrintr.f32 s0, s9 @ encoding: [0xb6,0xee,0x64,0x0a] + it eq + vrintxeq.f64 d28, d30 +@ CHECK: vrintxeq.f64 d28, d30 @ encoding: [0xf7,0xee,0x6e,0xcb] + it vs + vrintxvs.f32 s10, s14 +@ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0xb7,0xee,0x47,0x5a] + +@ VRINT{A,N,P,M} + + vrinta.f64 d3, d4 +@ CHECK: vrinta.f64 d3, d4 @ encoding: [0xb8,0xfe,0x44,0x3b] + vrinta.f32 s12, s1 +@ CHECK: vrinta.f32 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x6a] + vrintn.f64 d3, d4 +@ CHECK: vrintn.f64 d3, d4 @ encoding: [0xb9,0xfe,0x44,0x3b] + vrintn.f32 s12, s1 +@ CHECK: vrintn.f32 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x6a] + vrintp.f64 d3, d4 +@ CHECK: vrintp.f64 d3, d4 @ encoding: [0xba,0xfe,0x44,0x3b] + vrintp.f32 s12, s1 +@ CHECK: vrintp.f32 s12, s1 @ encoding: [0xba,0xfe,0x60,0x6a] + vrintm.f64 d3, d4 +@ CHECK: vrintm.f64 d3, d4 @ encoding: [0xbb,0xfe,0x44,0x3b] + vrintm.f32 s12, s1 +@ CHECK: vrintm.f32 s12, s1 @ encoding: [0xbb,0xfe,0x60,0x6a] Index: test/MC/ARM/thumb-v8fp.s =================================================================== --- test/MC/ARM/thumb-v8fp.s +++ /dev/null @@ -1,130 +0,0 @@ -@ RUN: llvm-mc -triple thumbv8 -mattr=+v8fp -show-encoding < %s | FileCheck %s - -@ VCVT{B,T} - - vcvtt.f64.f16 d3, s1 -@ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] - vcvtt.f16.f64 s5, d12 -@ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] - - vcvtb.f64.f16 d3, s1 -@ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] - vcvtb.f16.f64 s4, d1 -@ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] - - it ge - vcvttge.f64.f16 d3, s1 -@ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] - it gt - vcvttgt.f16.f64 s5, d12 -@ CHECK: vcvttgt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] - it eq - vcvtbeq.f64.f16 d3, s1 -@ CHECK: vcvtbeq.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] - it lt - vcvtblt.f16.f64 s4, d1 -@ CHECK: vcvtblt.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] - - -@ VCVT{A,N,P,M} - - vcvta.s32.f32 s2, s3 -@ CHECK: vcvta.s32.f32 s2, s3 @ encoding: [0xbc,0xfe,0xe1,0x1a] - vcvta.s32.f64 s2, d3 -@ CHECK: vcvta.s32.f64 s2, d3 @ encoding: [0xbc,0xfe,0xc3,0x1b] - vcvtn.s32.f32 s6, s23 -@ CHECK: vcvtn.s32.f32 s6, s23 @ encoding: [0xbd,0xfe,0xeb,0x3a] - vcvtn.s32.f64 s6, d23 -@ CHECK: vcvtn.s32.f64 s6, d23 @ encoding: [0xbd,0xfe,0xe7,0x3b] - vcvtp.s32.f32 s0, s4 -@ CHECK: vcvtp.s32.f32 s0, s4 @ encoding: [0xbe,0xfe,0xc2,0x0a] - vcvtp.s32.f64 s0, d4 -@ CHECK: vcvtp.s32.f64 s0, d4 @ encoding: [0xbe,0xfe,0xc4,0x0b] - vcvtm.s32.f32 s17, s8 -@ CHECK: vcvtm.s32.f32 s17, s8 @ encoding: [0xff,0xfe,0xc4,0x8a] - vcvtm.s32.f64 s17, d8 -@ CHECK: vcvtm.s32.f64 s17, d8 @ encoding: [0xff,0xfe,0xc8,0x8b] - - vcvta.u32.f32 s2, s3 -@ CHECK: vcvta.u32.f32 s2, s3 @ encoding: [0xbc,0xfe,0x61,0x1a] - vcvta.u32.f64 s2, d3 -@ CHECK: vcvta.u32.f64 s2, d3 @ encoding: [0xbc,0xfe,0x43,0x1b] - vcvtn.u32.f32 s6, s23 -@ CHECK: vcvtn.u32.f32 s6, s23 @ encoding: [0xbd,0xfe,0x6b,0x3a] - vcvtn.u32.f64 s6, d23 -@ CHECK: vcvtn.u32.f64 s6, d23 @ encoding: [0xbd,0xfe,0x67,0x3b] - vcvtp.u32.f32 s0, s4 -@ CHECK: vcvtp.u32.f32 s0, s4 @ encoding: [0xbe,0xfe,0x42,0x0a] - vcvtp.u32.f64 s0, d4 -@ CHECK: vcvtp.u32.f64 s0, d4 @ encoding: [0xbe,0xfe,0x44,0x0b] - vcvtm.u32.f32 s17, s8 -@ CHECK: vcvtm.u32.f32 s17, s8 @ encoding: [0xff,0xfe,0x44,0x8a] - vcvtm.u32.f64 s17, d8 -@ CHECK: vcvtm.u32.f64 s17, d8 @ encoding: [0xff,0xfe,0x48,0x8b] - - -@ VSEL - vselge.f32 s4, s1, s23 -@ CHECK: vselge.f32 s4, s1, s23 @ encoding: [0x20,0xfe,0xab,0x2a] - vselge.f64 d30, d31, d23 -@ CHECK: vselge.f64 d30, d31, d23 @ encoding: [0x6f,0xfe,0xa7,0xeb] - vselgt.f32 s0, s1, s0 -@ CHECK: vselgt.f32 s0, s1, s0 @ encoding: [0x30,0xfe,0x80,0x0a] - vselgt.f64 d5, d10, d20 -@ CHECK: vselgt.f64 d5, d10, d20 @ encoding: [0x3a,0xfe,0x24,0x5b] - vseleq.f32 s30, s28, s23 -@ CHECK: vseleq.f32 s30, s28, s23 @ encoding: [0x0e,0xfe,0x2b,0xfa] - vseleq.f64 d2, d4, d8 -@ CHECK: vseleq.f64 d2, d4, d8 @ encoding: [0x04,0xfe,0x08,0x2b] - vselvs.f32 s21, s16, s14 -@ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x58,0xfe,0x07,0xaa] - vselvs.f64 d0, d1, d31 -@ CHECK: vselvs.f64 d0, d1, d31 @ encoding: [0x11,0xfe,0x2f,0x0b] - - -@ VMAXNM / VMINNM - vmaxnm.f32 s5, s12, s0 -@ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x2a] - vmaxnm.f64 d5, d22, d30 -@ CHECK: vmaxnm.f64 d5, d22, d30 @ encoding: [0x86,0xfe,0xae,0x5b] - vminnm.f32 s0, s0, s12 -@ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x0a] - vminnm.f64 d4, d6, d9 -@ CHECK: vminnm.f64 d4, d6, d9 @ encoding: [0x86,0xfe,0x49,0x4b] - -@ VRINT{Z,R,X} - it ge - vrintzge.f64 d3, d12 -@ CHECK: vrintzge.f64 d3, d12 @ encoding: [0xb6,0xee,0xcc,0x3b] - vrintz.f32 s3, s24 -@ CHECK: vrintz.f32 s3, s24 @ encoding: [0xf6,0xee,0xcc,0x1a] - it lt - vrintrlt.f64 d5, d0 -@ CHECK: vrintrlt.f64 d5, d0 @ encoding: [0xb6,0xee,0x40,0x5b] - vrintr.f32 s0, s9 -@ CHECK: vrintr.f32 s0, s9 @ encoding: [0xb6,0xee,0x64,0x0a] - it eq - vrintxeq.f64 d28, d30 -@ CHECK: vrintxeq.f64 d28, d30 @ encoding: [0xf7,0xee,0x6e,0xcb] - it vs - vrintxvs.f32 s10, s14 -@ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0xb7,0xee,0x47,0x5a] - -@ VRINT{A,N,P,M} - - vrinta.f64 d3, d4 -@ CHECK: vrinta.f64 d3, d4 @ encoding: [0xb8,0xfe,0x44,0x3b] - vrinta.f32 s12, s1 -@ CHECK: vrinta.f32 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x6a] - vrintn.f64 d3, d4 -@ CHECK: vrintn.f64 d3, d4 @ encoding: [0xb9,0xfe,0x44,0x3b] - vrintn.f32 s12, s1 -@ CHECK: vrintn.f32 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x6a] - vrintp.f64 d3, d4 -@ CHECK: vrintp.f64 d3, d4 @ encoding: [0xba,0xfe,0x44,0x3b] - vrintp.f32 s12, s1 -@ CHECK: vrintp.f32 s12, s1 @ encoding: [0xba,0xfe,0x60,0x6a] - vrintm.f64 d3, d4 -@ CHECK: vrintm.f64 d3, d4 @ encoding: [0xbb,0xfe,0x44,0x3b] - vrintm.f32 s12, s1 -@ CHECK: vrintm.f32 s12, s1 @ encoding: [0xbb,0xfe,0x60,0x6a] Index: test/MC/ARM/v8fp.s =================================================================== --- test/MC/ARM/v8fp.s +++ /dev/null @@ -1,124 +0,0 @@ -@ RUN: llvm-mc -triple armv8 -mattr=+v8fp -show-encoding < %s | FileCheck %s - -@ VCVT{B,T} - - vcvtt.f64.f16 d3, s1 -@ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] - vcvtt.f16.f64 s5, d12 -@ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] - - vcvtb.f64.f16 d3, s1 -@ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee] - vcvtb.f16.f64 s4, d1 -@ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee] - - vcvttge.f64.f16 d3, s1 -@ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xae] - vcvttgt.f16.f64 s5, d12 -@ CHECK: vcvttgt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xce] - - vcvtbeq.f64.f16 d3, s1 -@ CHECK: vcvtbeq.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0x0e] - vcvtblt.f16.f64 s4, d1 -@ CHECK: vcvtblt.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xbe] - - -@ VCVT{A,N,P,M} - - vcvta.s32.f32 s2, s3 -@ CHECK: vcvta.s32.f32 s2, s3 @ encoding: [0xe1,0x1a,0xbc,0xfe] - vcvta.s32.f64 s2, d3 -@ CHECK: vcvta.s32.f64 s2, d3 @ encoding: [0xc3,0x1b,0xbc,0xfe] - vcvtn.s32.f32 s6, s23 -@ CHECK: vcvtn.s32.f32 s6, s23 @ encoding: [0xeb,0x3a,0xbd,0xfe] - vcvtn.s32.f64 s6, d23 -@ CHECK: vcvtn.s32.f64 s6, d23 @ encoding: [0xe7,0x3b,0xbd,0xfe] - vcvtp.s32.f32 s0, s4 -@ CHECK: vcvtp.s32.f32 s0, s4 @ encoding: [0xc2,0x0a,0xbe,0xfe] - vcvtp.s32.f64 s0, d4 -@ CHECK: vcvtp.s32.f64 s0, d4 @ encoding: [0xc4,0x0b,0xbe,0xfe] - vcvtm.s32.f32 s17, s8 -@ CHECK: vcvtm.s32.f32 s17, s8 @ encoding: [0xc4,0x8a,0xff,0xfe] - vcvtm.s32.f64 s17, d8 -@ CHECK: vcvtm.s32.f64 s17, d8 @ encoding: [0xc8,0x8b,0xff,0xfe] - - vcvta.u32.f32 s2, s3 -@ CHECK: vcvta.u32.f32 s2, s3 @ encoding: [0x61,0x1a,0xbc,0xfe] - vcvta.u32.f64 s2, d3 -@ CHECK: vcvta.u32.f64 s2, d3 @ encoding: [0x43,0x1b,0xbc,0xfe] - vcvtn.u32.f32 s6, s23 -@ CHECK: vcvtn.u32.f32 s6, s23 @ encoding: [0x6b,0x3a,0xbd,0xfe] - vcvtn.u32.f64 s6, d23 -@ CHECK: vcvtn.u32.f64 s6, d23 @ encoding: [0x67,0x3b,0xbd,0xfe] - vcvtp.u32.f32 s0, s4 -@ CHECK: vcvtp.u32.f32 s0, s4 @ encoding: [0x42,0x0a,0xbe,0xfe] - vcvtp.u32.f64 s0, d4 -@ CHECK: vcvtp.u32.f64 s0, d4 @ encoding: [0x44,0x0b,0xbe,0xfe] - vcvtm.u32.f32 s17, s8 -@ CHECK: vcvtm.u32.f32 s17, s8 @ encoding: [0x44,0x8a,0xff,0xfe] - vcvtm.u32.f64 s17, d8 -@ CHECK: vcvtm.u32.f64 s17, d8 @ encoding: [0x48,0x8b,0xff,0xfe] - - -@ VSEL - vselge.f32 s4, s1, s23 -@ CHECK: vselge.f32 s4, s1, s23 @ encoding: [0xab,0x2a,0x20,0xfe] - vselge.f64 d30, d31, d23 -@ CHECK: vselge.f64 d30, d31, d23 @ encoding: [0xa7,0xeb,0x6f,0xfe] - vselgt.f32 s0, s1, s0 -@ CHECK: vselgt.f32 s0, s1, s0 @ encoding: [0x80,0x0a,0x30,0xfe] - vselgt.f64 d5, d10, d20 -@ CHECK: vselgt.f64 d5, d10, d20 @ encoding: [0x24,0x5b,0x3a,0xfe] - vseleq.f32 s30, s28, s23 -@ CHECK: vseleq.f32 s30, s28, s23 @ encoding: [0x2b,0xfa,0x0e,0xfe] - vseleq.f64 d2, d4, d8 -@ CHECK: vseleq.f64 d2, d4, d8 @ encoding: [0x08,0x2b,0x04,0xfe] - vselvs.f32 s21, s16, s14 -@ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x07,0xaa,0x58,0xfe] - vselvs.f64 d0, d1, d31 -@ CHECK: vselvs.f64 d0, d1, d31 @ encoding: [0x2f,0x0b,0x11,0xfe] - - -@ VMAXNM / VMINNM - vmaxnm.f32 s5, s12, s0 -@ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0x00,0x2a,0xc6,0xfe] - vmaxnm.f64 d5, d22, d30 -@ CHECK: vmaxnm.f64 d5, d22, d30 @ encoding: [0xae,0x5b,0x86,0xfe] - vminnm.f32 s0, s0, s12 -@ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x46,0x0a,0x80,0xfe] - vminnm.f64 d4, d6, d9 -@ CHECK: vminnm.f64 d4, d6, d9 @ encoding: [0x49,0x4b,0x86,0xfe] - -@ VRINT{Z,R,X} - - vrintzge.f64 d3, d12 -@ CHECK: vrintzge.f64 d3, d12 @ encoding: [0xcc,0x3b,0xb6,0xae] - vrintz.f32 s3, s24 -@ CHECK: vrintz.f32 s3, s24 @ encoding: [0xcc,0x1a,0xf6,0xee] - vrintrlt.f64 d5, d0 -@ CHECK: vrintrlt.f64 d5, d0 @ encoding: [0x40,0x5b,0xb6,0xbe] - vrintr.f32 s0, s9 -@ CHECK: vrintr.f32 s0, s9 @ encoding: [0x64,0x0a,0xb6,0xee] - vrintxeq.f64 d28, d30 -@ CHECK: vrintxeq.f64 d28, d30 @ encoding: [0x6e,0xcb,0xf7,0x0e] - vrintxvs.f32 s10, s14 -@ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0x47,0x5a,0xb7,0x6e] - -@ VRINT{A,N,P,M} - - vrinta.f64 d3, d4 -@ CHECK: vrinta.f64 d3, d4 @ encoding: [0x44,0x3b,0xb8,0xfe] - vrinta.f32 s12, s1 -@ CHECK: vrinta.f32 s12, s1 @ encoding: [0x60,0x6a,0xb8,0xfe] - vrintn.f64 d3, d4 -@ CHECK: vrintn.f64 d3, d4 @ encoding: [0x44,0x3b,0xb9,0xfe] - vrintn.f32 s12, s1 -@ CHECK: vrintn.f32 s12, s1 @ encoding: [0x60,0x6a,0xb9,0xfe] - vrintp.f64 d3, d4 -@ CHECK: vrintp.f64 d3, d4 @ encoding: [0x44,0x3b,0xba,0xfe] - vrintp.f32 s12, s1 -@ CHECK: vrintp.f32 s12, s1 @ encoding: [0x60,0x6a,0xba,0xfe] - vrintm.f64 d3, d4 -@ CHECK: vrintm.f64 d3, d4 @ encoding: [0x44,0x3b,0xbb,0xfe] - vrintm.f32 s12, s1 -@ CHECK: vrintm.f32 s12, s1 @ encoding: [0x60,0x6a,0xbb,0xfe] Index: test/MC/Disassembler/ARM/fp-armv8.txt =================================================================== --- /dev/null +++ test/MC/Disassembler/ARM/fp-armv8.txt @@ -0,0 +1,155 @@ +# RUN: llvm-mc -disassemble -triple armv8 -mattr=+fp-armv8 -show-encoding < %s | FileCheck %s + +0xe0 0x3b 0xb2 0xee +# CHECK: vcvtt.f64.f16 d3, s1 + +0xcc 0x2b 0xf3 0xee +# CHECK: vcvtt.f16.f64 s5, d12 + +0x60 0x3b 0xb2 0xee +# CHECK: vcvtb.f64.f16 d3, s1 + +0x41 0x2b 0xb3 0xee +# CHECK: vcvtb.f16.f64 s4, d1 + +0xe0 0x3b 0xb2 0xae +# CHECK: vcvttge.f64.f16 d3, s1 + +0xcc 0x2b 0xf3 0xce +# CHECK: vcvttgt.f16.f64 s5, d12 + +0x60 0x3b 0xb2 0x0e +# CHECK: vcvtbeq.f64.f16 d3, s1 + +0x41 0x2b 0xb3 0xbe +# CHECK: vcvtblt.f16.f64 s4, d1 + + +0xe1 0x1a 0xbc 0xfe +# CHECK: vcvta.s32.f32 s2, s3 + +0xc3 0x1b 0xbc 0xfe +# CHECK: vcvta.s32.f64 s2, d3 + +0xeb 0x3a 0xbd 0xfe +# CHECK: vcvtn.s32.f32 s6, s23 + +0xe7 0x3b 0xbd 0xfe +# CHECK: vcvtn.s32.f64 s6, d23 + +0xc2 0x0a 0xbe 0xfe +# CHECK: vcvtp.s32.f32 s0, s4 + +0xc4 0x0b 0xbe 0xfe +# CHECK: vcvtp.s32.f64 s0, d4 + +0xc4 0x8a 0xff 0xfe +# CHECK: vcvtm.s32.f32 s17, s8 + +0xc8 0x8b 0xff 0xfe +# CHECK: vcvtm.s32.f64 s17, d8 + +0x61 0x1a 0xbc 0xfe +# CHECK: vcvta.u32.f32 s2, s3 + +0x43 0x1b 0xbc 0xfe +# CHECK: vcvta.u32.f64 s2, d3 + +0x6b 0x3a 0xbd 0xfe +# CHECK: vcvtn.u32.f32 s6, s23 + +0x67 0x3b 0xbd 0xfe +# CHECK: vcvtn.u32.f64 s6, d23 + +0x42 0x0a 0xbe 0xfe +# CHECK: vcvtp.u32.f32 s0, s4 + +0x44 0x0b 0xbe 0xfe +# CHECK: vcvtp.u32.f64 s0, d4 + +0x44 0x8a 0xff 0xfe +# CHECK: vcvtm.u32.f32 s17, s8 + +0x48 0x8b 0xff 0xfe +# CHECK: vcvtm.u32.f64 s17, d8 + + +0xab 0x2a 0x20 0xfe +# CHECK: vselge.f32 s4, s1, s23 + +0xa7 0xeb 0x6f 0xfe +# CHECK: vselge.f64 d30, d31, d23 + +0x80 0x0a 0x30 0xfe +# CHECK: vselgt.f32 s0, s1, s0 + +0x24 0x5b 0x3a 0xfe +# CHECK: vselgt.f64 d5, d10, d20 + +0x2b 0xfa 0x0e 0xfe +# CHECK: vseleq.f32 s30, s28, s23 + +0x08 0x2b 0x04 0xfe +# CHECK: vseleq.f64 d2, d4, d8 + +0x07 0xaa 0x58 0xfe +# CHECK: vselvs.f32 s21, s16, s14 + +0x2f 0x0b 0x11 0xfe +# CHECK: vselvs.f64 d0, d1, d31 + + +0x00 0x2a 0xc6 0xfe +# CHECK: vmaxnm.f32 s5, s12, s0 + +0xae 0x5b 0x86 0xfe +# CHECK: vmaxnm.f64 d5, d22, d30 + +0x46 0x0a 0x80 0xfe +# CHECK: vminnm.f32 s0, s0, s12 + +0x49 0x4b 0x86 0xfe +# CHECK: vminnm.f64 d4, d6, d9 + + +0xcc 0x3b 0xb6 0xae +# CHECK: vrintzge.f64 d3, d12 + +0xcc 0x1a 0xf6 0xee +# CHECK: vrintz.f32 s3, s24 + +0x40 0x5b 0xb6 0xbe +# CHECK: vrintrlt.f64 d5, d0 + +0x64 0x0a 0xb6 0xee +# CHECK: vrintr.f32 s0, s9 + +0x6e 0xcb 0xf7 0x0e +# CHECK: vrintxeq.f64 d28, d30 + +0x47 0x5a 0xb7 0x6e +# CHECK: vrintxvs.f32 s10, s14 + +0x44 0x3b 0xb8 0xfe +# CHECK: vrinta.f64 d3, d4 + +0x60 0x6a 0xb8 0xfe +# CHECK: vrinta.f32 s12, s1 + +0x44 0x3b 0xb9 0xfe +# CHECK: vrintn.f64 d3, d4 + +0x60 0x6a 0xb9 0xfe +# CHECK: vrintn.f32 s12, s1 + +0x44 0x3b 0xba 0xfe +# CHECK: vrintp.f64 d3, d4 + +0x60 0x6a 0xba 0xfe +# CHECK: vrintp.f32 s12, s1 + +0x44 0x3b 0xbb 0xfe +# CHECK: vrintm.f64 d3, d4 + +0x60 0x6a 0xbb 0xfe +# CHECK: vrintm.f32 s12, s1 Index: test/MC/Disassembler/ARM/thumb-fp-armv8.txt =================================================================== --- /dev/null +++ test/MC/Disassembler/ARM/thumb-fp-armv8.txt @@ -0,0 +1,163 @@ +# RUN: llvm-mc -disassemble -triple thumbv8 -mattr=+fp-armv8 -show-encoding < %s | FileCheck %s + +0xb2 0xee 0xe0 0x3b +# CHECK: vcvtt.f64.f16 d3, s1 + +0xf3 0xee 0xcc 0x2b +# CHECK: vcvtt.f16.f64 s5, d12 + +0xb2 0xee 0x60 0x3b +# CHECK: vcvtb.f64.f16 d3, s1 + +0xb3 0xee 0x41 0x2b +# CHECK: vcvtb.f16.f64 s4, d1 + +0xa8 0xbf # IT block +0xb2 0xee 0xe0 0x3b +# CHECK: vcvttge.f64.f16 d3, s1 + +0xc8 0xbf # IT block +0xf3 0xee 0xcc 0x2b +# CHECK: vcvttgt.f16.f64 s5, d12 + +0x08 0xbf # IT block +0xb2 0xee 0x60 0x3b +# CHECK: vcvtbeq.f64.f16 d3, s1 + +0xb8 0xbf # IT block +0xb3 0xee 0x41 0x2b +# CHECK: vcvtblt.f16.f64 s4, d1 + + +0xbc 0xfe 0xe1 0x1a +# CHECK: vcvta.s32.f32 s2, s3 + +0xbc 0xfe 0xc3 0x1b +# CHECK: vcvta.s32.f64 s2, d3 + +0xbd 0xfe 0xeb 0x3a +# CHECK: vcvtn.s32.f32 s6, s23 + +0xbd 0xfe 0xe7 0x3b +# CHECK: vcvtn.s32.f64 s6, d23 + +0xbe 0xfe 0xc2 0x0a +# CHECK: vcvtp.s32.f32 s0, s4 + +0xbe 0xfe 0xc4 0x0b +# CHECK: vcvtp.s32.f64 s0, d4 + +0xff 0xfe 0xc4 0x8a +# CHECK: vcvtm.s32.f32 s17, s8 + +0xff 0xfe 0xc8 0x8b +# CHECK: vcvtm.s32.f64 s17, d8 + +0xbc 0xfe 0x61 0x1a +# CHECK: vcvta.u32.f32 s2, s3 + +0xbc 0xfe 0x43 0x1b +# CHECK: vcvta.u32.f64 s2, d3 + +0xbd 0xfe 0x6b 0x3a +# CHECK: vcvtn.u32.f32 s6, s23 + +0xbd 0xfe 0x67 0x3b +# CHECK: vcvtn.u32.f64 s6, d23 + +0xbe 0xfe 0x42 0x0a +# CHECK: vcvtp.u32.f32 s0, s4 + +0xbe 0xfe 0x44 0x0b +# CHECK: vcvtp.u32.f64 s0, d4 + +0xff 0xfe 0x44 0x8a +# CHECK: vcvtm.u32.f32 s17, s8 + +0xff 0xfe 0x48 0x8b +# CHECK: vcvtm.u32.f64 s17, d8 + + +0x20 0xfe 0xab 0x2a +# CHECK: vselge.f32 s4, s1, s23 + +0x6f 0xfe 0xa7 0xeb +# CHECK: vselge.f64 d30, d31, d23 + +0x30 0xfe 0x80 0x0a +# CHECK: vselgt.f32 s0, s1, s0 + +0x3a 0xfe 0x24 0x5b +# CHECK: vselgt.f64 d5, d10, d20 + +0x0e 0xfe 0x2b 0xfa +# CHECK: vseleq.f32 s30, s28, s23 + +0x04 0xfe 0x08 0x2b +# CHECK: vseleq.f64 d2, d4, d8 + +0x58 0xfe 0x07 0xaa +# CHECK: vselvs.f32 s21, s16, s14 + +0x11 0xfe 0x2f 0x0b +# CHECK: vselvs.f64 d0, d1, d31 + + +0xc6 0xfe 0x00 0x2a +# CHECK: vmaxnm.f32 s5, s12, s0 + +0x86 0xfe 0xae 0x5b +# CHECK: vmaxnm.f64 d5, d22, d30 + +0x80 0xfe 0x46 0x0a +# CHECK: vminnm.f32 s0, s0, s12 + +0x86 0xfe 0x49 0x4b +# CHECK: vminnm.f64 d4, d6, d9 + + +0xa8 0xbf # IT block +0xb6 0xee 0xcc 0x3b +# CHECK: vrintzge.f64 d3, d12 + +0xf6 0xee 0xcc 0x1a +# CHECK: vrintz.f32 s3, s24 + +0xb8 0xbf # IT block +0xb6 0xee 0x40 0x5b +# CHECK: vrintrlt.f64 d5, d0 + +0xb6 0xee 0x64 0x0a +# CHECK: vrintr.f32 s0, s9 + +0x08 0xbf # IT block +0xf7 0xee 0x6e 0xcb +# CHECK: vrintxeq.f64 d28, d30 + +0x68 0xbf # IT block +0xb7 0xee 0x47 0x5a +# CHECK: vrintxvs.f32 s10, s14 + +0xb8 0xfe 0x44 0x3b +# CHECK: vrinta.f64 d3, d4 + +0xb8 0xfe 0x60 0x6a +# CHECK: vrinta.f32 s12, s1 + +0xb9 0xfe 0x44 0x3b +# CHECK: vrintn.f64 d3, d4 + +0xb9 0xfe 0x60 0x6a +# CHECK: vrintn.f32 s12, s1 + +0xba 0xfe 0x44 0x3b +# CHECK: vrintp.f64 d3, d4 + +0xba 0xfe 0x60 0x6a +# CHECK: vrintp.f32 s12, s1 + +0xbb 0xfe 0x44 0x3b +# CHECK: vrintm.f64 d3, d4 + +0xbb 0xfe 0x60 0x6a +# CHECK: vrintm.f32 s12, s1 Index: test/MC/Disassembler/ARM/thumb-v8fp.txt =================================================================== --- test/MC/Disassembler/ARM/thumb-v8fp.txt +++ /dev/null @@ -1,163 +0,0 @@ -# RUN: llvm-mc -disassemble -triple thumbv8 -mattr=+v8fp -show-encoding < %s | FileCheck %s - -0xb2 0xee 0xe0 0x3b -# CHECK: vcvtt.f64.f16 d3, s1 - -0xf3 0xee 0xcc 0x2b -# CHECK: vcvtt.f16.f64 s5, d12 - -0xb2 0xee 0x60 0x3b -# CHECK: vcvtb.f64.f16 d3, s1 - -0xb3 0xee 0x41 0x2b -# CHECK: vcvtb.f16.f64 s4, d1 - -0xa8 0xbf # IT block -0xb2 0xee 0xe0 0x3b -# CHECK: vcvttge.f64.f16 d3, s1 - -0xc8 0xbf # IT block -0xf3 0xee 0xcc 0x2b -# CHECK: vcvttgt.f16.f64 s5, d12 - -0x08 0xbf # IT block -0xb2 0xee 0x60 0x3b -# CHECK: vcvtbeq.f64.f16 d3, s1 - -0xb8 0xbf # IT block -0xb3 0xee 0x41 0x2b -# CHECK: vcvtblt.f16.f64 s4, d1 - - -0xbc 0xfe 0xe1 0x1a -# CHECK: vcvta.s32.f32 s2, s3 - -0xbc 0xfe 0xc3 0x1b -# CHECK: vcvta.s32.f64 s2, d3 - -0xbd 0xfe 0xeb 0x3a -# CHECK: vcvtn.s32.f32 s6, s23 - -0xbd 0xfe 0xe7 0x3b -# CHECK: vcvtn.s32.f64 s6, d23 - -0xbe 0xfe 0xc2 0x0a -# CHECK: vcvtp.s32.f32 s0, s4 - -0xbe 0xfe 0xc4 0x0b -# CHECK: vcvtp.s32.f64 s0, d4 - -0xff 0xfe 0xc4 0x8a -# CHECK: vcvtm.s32.f32 s17, s8 - -0xff 0xfe 0xc8 0x8b -# CHECK: vcvtm.s32.f64 s17, d8 - -0xbc 0xfe 0x61 0x1a -# CHECK: vcvta.u32.f32 s2, s3 - -0xbc 0xfe 0x43 0x1b -# CHECK: vcvta.u32.f64 s2, d3 - -0xbd 0xfe 0x6b 0x3a -# CHECK: vcvtn.u32.f32 s6, s23 - -0xbd 0xfe 0x67 0x3b -# CHECK: vcvtn.u32.f64 s6, d23 - -0xbe 0xfe 0x42 0x0a -# CHECK: vcvtp.u32.f32 s0, s4 - -0xbe 0xfe 0x44 0x0b -# CHECK: vcvtp.u32.f64 s0, d4 - -0xff 0xfe 0x44 0x8a -# CHECK: vcvtm.u32.f32 s17, s8 - -0xff 0xfe 0x48 0x8b -# CHECK: vcvtm.u32.f64 s17, d8 - - -0x20 0xfe 0xab 0x2a -# CHECK: vselge.f32 s4, s1, s23 - -0x6f 0xfe 0xa7 0xeb -# CHECK: vselge.f64 d30, d31, d23 - -0x30 0xfe 0x80 0x0a -# CHECK: vselgt.f32 s0, s1, s0 - -0x3a 0xfe 0x24 0x5b -# CHECK: vselgt.f64 d5, d10, d20 - -0x0e 0xfe 0x2b 0xfa -# CHECK: vseleq.f32 s30, s28, s23 - -0x04 0xfe 0x08 0x2b -# CHECK: vseleq.f64 d2, d4, d8 - -0x58 0xfe 0x07 0xaa -# CHECK: vselvs.f32 s21, s16, s14 - -0x11 0xfe 0x2f 0x0b -# CHECK: vselvs.f64 d0, d1, d31 - - -0xc6 0xfe 0x00 0x2a -# CHECK: vmaxnm.f32 s5, s12, s0 - -0x86 0xfe 0xae 0x5b -# CHECK: vmaxnm.f64 d5, d22, d30 - -0x80 0xfe 0x46 0x0a -# CHECK: vminnm.f32 s0, s0, s12 - -0x86 0xfe 0x49 0x4b -# CHECK: vminnm.f64 d4, d6, d9 - - -0xa8 0xbf # IT block -0xb6 0xee 0xcc 0x3b -# CHECK: vrintzge.f64 d3, d12 - -0xf6 0xee 0xcc 0x1a -# CHECK: vrintz.f32 s3, s24 - -0xb8 0xbf # IT block -0xb6 0xee 0x40 0x5b -# CHECK: vrintrlt.f64 d5, d0 - -0xb6 0xee 0x64 0x0a -# CHECK: vrintr.f32 s0, s9 - -0x08 0xbf # IT block -0xf7 0xee 0x6e 0xcb -# CHECK: vrintxeq.f64 d28, d30 - -0x68 0xbf # IT block -0xb7 0xee 0x47 0x5a -# CHECK: vrintxvs.f32 s10, s14 - -0xb8 0xfe 0x44 0x3b -# CHECK: vrinta.f64 d3, d4 - -0xb8 0xfe 0x60 0x6a -# CHECK: vrinta.f32 s12, s1 - -0xb9 0xfe 0x44 0x3b -# CHECK: vrintn.f64 d3, d4 - -0xb9 0xfe 0x60 0x6a -# CHECK: vrintn.f32 s12, s1 - -0xba 0xfe 0x44 0x3b -# CHECK: vrintp.f64 d3, d4 - -0xba 0xfe 0x60 0x6a -# CHECK: vrintp.f32 s12, s1 - -0xbb 0xfe 0x44 0x3b -# CHECK: vrintm.f64 d3, d4 - -0xbb 0xfe 0x60 0x6a -# CHECK: vrintm.f32 s12, s1 Index: test/MC/Disassembler/ARM/v8fp.txt =================================================================== --- test/MC/Disassembler/ARM/v8fp.txt +++ /dev/null @@ -1,155 +0,0 @@ -# RUN: llvm-mc -disassemble -triple armv8 -mattr=+v8fp -show-encoding < %s | FileCheck %s - -0xe0 0x3b 0xb2 0xee -# CHECK: vcvtt.f64.f16 d3, s1 - -0xcc 0x2b 0xf3 0xee -# CHECK: vcvtt.f16.f64 s5, d12 - -0x60 0x3b 0xb2 0xee -# CHECK: vcvtb.f64.f16 d3, s1 - -0x41 0x2b 0xb3 0xee -# CHECK: vcvtb.f16.f64 s4, d1 - -0xe0 0x3b 0xb2 0xae -# CHECK: vcvttge.f64.f16 d3, s1 - -0xcc 0x2b 0xf3 0xce -# CHECK: vcvttgt.f16.f64 s5, d12 - -0x60 0x3b 0xb2 0x0e -# CHECK: vcvtbeq.f64.f16 d3, s1 - -0x41 0x2b 0xb3 0xbe -# CHECK: vcvtblt.f16.f64 s4, d1 - - -0xe1 0x1a 0xbc 0xfe -# CHECK: vcvta.s32.f32 s2, s3 - -0xc3 0x1b 0xbc 0xfe -# CHECK: vcvta.s32.f64 s2, d3 - -0xeb 0x3a 0xbd 0xfe -# CHECK: vcvtn.s32.f32 s6, s23 - -0xe7 0x3b 0xbd 0xfe -# CHECK: vcvtn.s32.f64 s6, d23 - -0xc2 0x0a 0xbe 0xfe -# CHECK: vcvtp.s32.f32 s0, s4 - -0xc4 0x0b 0xbe 0xfe -# CHECK: vcvtp.s32.f64 s0, d4 - -0xc4 0x8a 0xff 0xfe -# CHECK: vcvtm.s32.f32 s17, s8 - -0xc8 0x8b 0xff 0xfe -# CHECK: vcvtm.s32.f64 s17, d8 - -0x61 0x1a 0xbc 0xfe -# CHECK: vcvta.u32.f32 s2, s3 - -0x43 0x1b 0xbc 0xfe -# CHECK: vcvta.u32.f64 s2, d3 - -0x6b 0x3a 0xbd 0xfe -# CHECK: vcvtn.u32.f32 s6, s23 - -0x67 0x3b 0xbd 0xfe -# CHECK: vcvtn.u32.f64 s6, d23 - -0x42 0x0a 0xbe 0xfe -# CHECK: vcvtp.u32.f32 s0, s4 - -0x44 0x0b 0xbe 0xfe -# CHECK: vcvtp.u32.f64 s0, d4 - -0x44 0x8a 0xff 0xfe -# CHECK: vcvtm.u32.f32 s17, s8 - -0x48 0x8b 0xff 0xfe -# CHECK: vcvtm.u32.f64 s17, d8 - - -0xab 0x2a 0x20 0xfe -# CHECK: vselge.f32 s4, s1, s23 - -0xa7 0xeb 0x6f 0xfe -# CHECK: vselge.f64 d30, d31, d23 - -0x80 0x0a 0x30 0xfe -# CHECK: vselgt.f32 s0, s1, s0 - -0x24 0x5b 0x3a 0xfe -# CHECK: vselgt.f64 d5, d10, d20 - -0x2b 0xfa 0x0e 0xfe -# CHECK: vseleq.f32 s30, s28, s23 - -0x08 0x2b 0x04 0xfe -# CHECK: vseleq.f64 d2, d4, d8 - -0x07 0xaa 0x58 0xfe -# CHECK: vselvs.f32 s21, s16, s14 - -0x2f 0x0b 0x11 0xfe -# CHECK: vselvs.f64 d0, d1, d31 - - -0x00 0x2a 0xc6 0xfe -# CHECK: vmaxnm.f32 s5, s12, s0 - -0xae 0x5b 0x86 0xfe -# CHECK: vmaxnm.f64 d5, d22, d30 - -0x46 0x0a 0x80 0xfe -# CHECK: vminnm.f32 s0, s0, s12 - -0x49 0x4b 0x86 0xfe -# CHECK: vminnm.f64 d4, d6, d9 - - -0xcc 0x3b 0xb6 0xae -# CHECK: vrintzge.f64 d3, d12 - -0xcc 0x1a 0xf6 0xee -# CHECK: vrintz.f32 s3, s24 - -0x40 0x5b 0xb6 0xbe -# CHECK: vrintrlt.f64 d5, d0 - -0x64 0x0a 0xb6 0xee -# CHECK: vrintr.f32 s0, s9 - -0x6e 0xcb 0xf7 0x0e -# CHECK: vrintxeq.f64 d28, d30 - -0x47 0x5a 0xb7 0x6e -# CHECK: vrintxvs.f32 s10, s14 - -0x44 0x3b 0xb8 0xfe -# CHECK: vrinta.f64 d3, d4 - -0x60 0x6a 0xb8 0xfe -# CHECK: vrinta.f32 s12, s1 - -0x44 0x3b 0xb9 0xfe -# CHECK: vrintn.f64 d3, d4 - -0x60 0x6a 0xb9 0xfe -# CHECK: vrintn.f32 s12, s1 - -0x44 0x3b 0xba 0xfe -# CHECK: vrintp.f64 d3, d4 - -0x60 0x6a 0xba 0xfe -# CHECK: vrintp.f32 s12, s1 - -0x44 0x3b 0xbb 0xfe -# CHECK: vrintm.f64 d3, d4 - -0x60 0x6a 0xbb 0xfe -# CHECK: vrintm.f32 s12, s1