Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -1341,6 +1341,92 @@ } +// GFNI Instructions +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + // AVX512 + def int_x86_avx512_mask_vgf2p8affineinvqb_128 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi_mask">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i16_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8affineinvqb_256 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi_mask">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8affineinvqb_512 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v64qi_mask">, + Intrinsic<[llvm_v64i8_ty], + [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v64i8_ty, llvm_i64_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vgf2p8affineqb_128 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi_mask">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i16_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8affineqb_256 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi_mask">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8affineqb_512 : + GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v64qi_mask">, + Intrinsic<[llvm_v64i8_ty], + [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v64i8_ty, llvm_i64_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vgf2p8mulb_128 : + GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi_mask">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8mulb_256 : + GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v32qi_mask">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_avx512_mask_vgf2p8mulb_512 : + GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v64qi_mask">, + Intrinsic<[llvm_v64i8_ty], + [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty], + [IntrNoMem]>; + + // SSE, AVX + def int_x86_gf2p8affineinvqb : + GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_vgf2p8affineinvqb : + GCCBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty], + [IntrNoMem]>; + + def int_x86_gf2p8affineqb : + GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_vgf2p8affineqb : + GCCBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty], + [IntrNoMem]>; + + def int_x86_gf2p8mulb : + GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_vgf2p8mulb : + GCCBuiltin<"__builtin_ia32_vgf2p8mulb_v32qi">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty], + [IntrNoMem]>; +} + // Vector blend let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_blendv_pd_256 : GCCBuiltin<"__builtin_ia32_blendvpd256">, Index: lib/Support/Host.cpp =================================================================== --- lib/Support/Host.cpp +++ lib/Support/Host.cpp @@ -1217,12 +1217,14 @@ Features["avx512vbmi"] = HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save; Features["pku"] = HasLeaf7 && ((ECX >> 4) & 1); Features["avx512vbmi2"] = HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save; + Features["gfni"] = HasLeaf7 && ((ECX >> 8) & 1); Features["vaes"] = HasLeaf7 && ((ECX >> 9) & 1) && HasAVXSave; Features["vpclmulqdq"] = HasLeaf7 && ((ECX >> 10) & 1) && HasAVXSave; Features["avx512vnni"] = HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save; Features["avx512bitalg"] = HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save; Features["avx512vpopcntdq"] = HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save; + bool HasLeafD = MaxLevel >= 0xd && !getX86CpuIDAndInfoEx(0xd, 0x1, &EAX, &EBX, &ECX, &EDX); Index: lib/Target/X86/X86.td =================================================================== --- lib/Target/X86/X86.td +++ lib/Target/X86/X86.td @@ -169,6 +169,9 @@ def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true", "Enable packed carry-less multiplication instructions", [FeatureSSE2]>; +def FeatureGFNI : SubtargetFeature<"gfni", "HasGFNI", "true", + "Enable Galios Field Arithmetic Instructions", + [FeatureSSE2]>; def FeatureVPCLMULQDQ : SubtargetFeature<"vpclmulqdq", "HasVPCLMULQDQ", "true", "Enable vpclmulqdq instructions", [FeatureAVX, FeaturePCLMUL]>; Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -579,6 +579,9 @@ // Conversions between float and half-float. CVTPS2PH, CVTPH2PS, CVTPH2PS_RND, + // Galios Field Arithmetic Instructions + GF2P8AFFINEINVQB, GF2P8AFFINEQB, GF2P8MULB, + // LWP insert record. LWPINS, Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -25269,6 +25269,9 @@ case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS"; case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD"; case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS"; + case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB"; + case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB"; + case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB"; } return nullptr; } Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -10215,3 +10215,55 @@ avx512vl_i16_info, HasBITALG>, avx512_unary_lowering, VEX_W; +//===----------------------------------------------------------------------===// +// GFNI +//===----------------------------------------------------------------------===// + +multiclass GF2P8MULB_avx512_common Op, string OpStr, SDNode OpNode> { + let Predicates = [HasGFNI, HasAVX512, HasBWI] in + defm Z : avx512_binop_rm, EVEX_V512; + let Predicates = [HasGFNI, HasVLX, HasBWI] in { + defm Z256 : avx512_binop_rm, EVEX_V256; + defm Z128 : avx512_binop_rm, EVEX_V128; + } +} + +defm GF2P8MULB : GF2P8MULB_avx512_common<0xCF, "vgf2p8mulb", X86GF2P8mulb>, + EVEX_CD8<8, CD8VF>, T8PD; + +multiclass GF2P8AFFINE_avx512_rmb_imm Op, string OpStr, SDNode OpNode, + X86VectorVTInfo VTI, + X86VectorVTInfo BcstVTI> + : avx512_3Op_rm_imm8 { + let ExeDomain = VTI.ExeDomain in + defm rmbi : AVX512_maskable, EVEX_B; +} + +multiclass GF2P8AFFINE_avx512_common Op, string OpStr, SDNode OpNode> { + let Predicates = [HasGFNI, HasAVX512, HasBWI] in + defm Z : GF2P8AFFINE_avx512_rmb_imm, EVEX_V512; + let Predicates = [HasGFNI, HasVLX, HasBWI] in { + defm Z256 : GF2P8AFFINE_avx512_rmb_imm, EVEX_V256; + defm Z128 : GF2P8AFFINE_avx512_rmb_imm, EVEX_V128; + } +} + +defm GF2P8AFFINEINVQB : GF2P8AFFINE_avx512_common<0xCF, "vgf2p8affineinvqb", + X86GF2P8affineinvqb>, + EVEX_4V, EVEX_CD8<8, CD8VF>, VEX_W, AVX512AIi8Base; +defm GF2P8AFFINEQB : GF2P8AFFINE_avx512_common<0xCE, "vgf2p8affineqb", + X86GF2P8affineqb>, + EVEX_4V, EVEX_CD8<8, CD8VF>, VEX_W, AVX512AIi8Base; + Index: lib/Target/X86/X86InstrFragmentsSIMD.td =================================================================== --- lib/Target/X86/X86InstrFragmentsSIMD.td +++ lib/Target/X86/X86InstrFragmentsSIMD.td @@ -656,6 +656,11 @@ def X86cvt2mask : SDNode<"X86ISD::CVT2MASK", SDTIntTruncOp>; +// galios field arithmetic +def X86GF2P8affineinvqb : SDNode<"X86ISD::GF2P8AFFINEINVQB", SDTBlend>; +def X86GF2P8affineqb : SDNode<"X86ISD::GF2P8AFFINEQB", SDTBlend>; +def X86GF2P8mulb : SDNode<"X86ISD::GF2P8MULB", SDTIntBinOp>; + //===----------------------------------------------------------------------===// // SSE Complex Patterns //===----------------------------------------------------------------------===// Index: lib/Target/X86/X86InstrInfo.td =================================================================== --- lib/Target/X86/X86InstrInfo.td +++ lib/Target/X86/X86InstrInfo.td @@ -848,6 +848,7 @@ def NoVLX_Or_NoVPCLMULQDQ : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">; def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">; +def HasGFNI : Predicate<"Subtarget->hasGFNI()">; def HasFMA : Predicate<"Subtarget->hasFMA()">; def HasFMA4 : Predicate<"Subtarget->hasFMA4()">; def HasXOP : Predicate<"Subtarget->hasXOP()">; Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -8468,3 +8468,82 @@ (COPY_TO_REGCLASS (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128), (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>; + +//===----------------------------------------------------------------------===// +// GFNI instructions +//===----------------------------------------------------------------------===// + +multiclass GF2P8MULB_rm { + let ExeDomain = SSEPackedInt, + AsmString = !if(Is2Addr, + OpcodeStr##"\t{$src2, $dst|$dst, $src2}", + OpcodeStr##"\t{$src2, $src1, $dst|$dst, $src1, $src2}") in { + let isCommutable = 1 in + def rr : PDI<0xCF, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), "", + [(set RC:$dst, (IntId RC:$src1, RC:$src2))], + SSE_INTALU_ITINS_P.rr>, + Sched<[SSE_INTALU_ITINS_P.Sched]>, T8PD; + + def rm : PDI<0xCF, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, X86MemOp:$src2), "", + [(set RC:$dst, (IntId RC:$src1, + (bitconvert (MemOpFrag addr:$src2))))], + SSE_INTALU_ITINS_P.rm>, + Sched<[SSE_INTALU_ITINS_P.Sched.Folded, ReadAfterLd]>, T8PD; + } +} + +multiclass GF2P8AFFINE_rmi Op, string OpStr, Intrinsic IntId, + RegisterClass RC, PatFrag MemOpFrag, + X86MemOperand X86MemOp, bit Is2Addr = 0> { + let AsmString = !if(Is2Addr, + OpStr##"\t{$src3, $src2, $dst|$dst, $src2, $src3}", + OpStr##"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}") in { + def rri : Ii8, + Sched<[WriteVecALU]>; + def rmi : Ii8, + Sched<[WriteVecALU.Folded, ReadAfterLd]>; + } +} + +multiclass GF2P8AFFINE_common Op, string OpStr> { + let Constraints = "$src1 = $dst", + Predicates = [HasGFNI, UseSSE2] in + defm NAME : GF2P8AFFINE_rmi("int_x86_"##OpStr), + VR128, loadv2i64, i128mem, 1>; + let Predicates = [HasGFNI, HasAVX, NoVLX] in { + defm V##NAME : GF2P8AFFINE_rmi("int_x86_"##OpStr), + VR128, loadv2i64, i128mem>, VEX_4V, VEX_W; + defm V##NAME##Y : GF2P8AFFINE_rmi("int_x86_v"##OpStr), VR256, + loadv4i64, i256mem>, VEX_4V, VEX_L, VEX_W; + } +} + +// GF2P8MULB +let Constraints = "$src1 = $dst", + Predicates = [HasGFNI, UseSSE2] in +defm GF2P8MULB : GF2P8MULB_rm<"gf2p8mulb", int_x86_gf2p8mulb, VR128, + memopv2i64, i128mem, 1>; +let Predicates = [HasGFNI, HasAVX, NoVLX] in { + defm VGF2P8MULB : GF2P8MULB_rm<"vgf2p8mulb", int_x86_gf2p8mulb, VR128, + loadv2i64, i128mem>, VEX_4V; + defm VGF2P8MULBY : GF2P8MULB_rm<"vgf2p8mulb", int_x86_vgf2p8mulb, VR256, + loadv4i64, i256mem>, VEX_4V, VEX_L; +} +// GF2P8AFFINEINVQB, GF2P8AFFINEQB +let isCommutable = 0 in { + defm GF2P8AFFINEINVQB : GF2P8AFFINE_common<0xCF, "gf2p8affineinvqb">, TAPD; + defm GF2P8AFFINEQB : GF2P8AFFINE_common<0xCE, "gf2p8affineqb">, TAPD; +} + Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -1157,6 +1157,25 @@ X86_INTRINSIC_DATA(avx512_mask_vfnmsub_ps_512, FMA_OP_MASK, X86ISD::FNMSUB, X86ISD::FNMSUB_RND), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineinvqb_128, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEINVQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineinvqb_256, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEINVQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineinvqb_512, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEINVQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineqb_128, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineqb_256, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8affineqb_512, INTR_TYPE_3OP_IMM8_MASK, + X86ISD::GF2P8AFFINEQB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8mulb_128, INTR_TYPE_2OP_MASK, + X86ISD::GF2P8MULB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8mulb_256, INTR_TYPE_2OP_MASK, + X86ISD::GF2P8MULB, 0), + X86_INTRINSIC_DATA(avx512_mask_vgf2p8mulb_512, INTR_TYPE_2OP_MASK, + X86ISD::GF2P8MULB, 0), + X86_INTRINSIC_DATA(avx512_mask_vpdpbusd_128, FMA_OP_MASK, X86ISD::VPDPBUSD, 0), X86_INTRINSIC_DATA(avx512_mask_vpdpbusd_256, FMA_OP_MASK, X86ISD::VPDPBUSD, 0), X86_INTRINSIC_DATA(avx512_mask_vpdpbusd_512, FMA_OP_MASK, X86ISD::VPDPBUSD, 0), @@ -1170,7 +1189,7 @@ X86_INTRINSIC_DATA(avx512_mask_vpdpwssds_256, FMA_OP_MASK, X86ISD::VPDPWSSDS, 0), X86_INTRINSIC_DATA(avx512_mask_vpdpwssds_512, FMA_OP_MASK, X86ISD::VPDPWSSDS, 0), - X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_128, VPERM_3OP_MASK, + X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_128, VPERM_3OP_MASK, X86ISD::VPERMIV3, 0), X86_INTRINSIC_DATA(avx512_mask_vpermi2var_d_256, VPERM_3OP_MASK, X86ISD::VPERMIV3, 0), Index: lib/Target/X86/X86Subtarget.h =================================================================== --- lib/Target/X86/X86Subtarget.h +++ lib/Target/X86/X86Subtarget.h @@ -128,6 +128,9 @@ bool HasPCLMUL; bool HasVPCLMULQDQ; + /// Target has Galios Field Arithmetic instructions + bool HasGFNI; + /// Target has 3-operand fused multiply-add bool HasFMA; @@ -476,6 +479,7 @@ bool hasXSAVES() const { return HasXSAVES; } bool hasPCLMUL() const { return HasPCLMUL; } bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; } + bool hasGFNI() const { return HasGFNI; } // Prefer FMA4 to FMA - its better for commutation/memory folding and // has equal or better performance on all supported targets. bool hasFMA() const { return HasFMA && !HasFMA4; } Index: lib/Target/X86/X86Subtarget.cpp =================================================================== --- lib/Target/X86/X86Subtarget.cpp +++ lib/Target/X86/X86Subtarget.cpp @@ -300,6 +300,7 @@ HasXSAVES = false; HasPCLMUL = false; HasVPCLMULQDQ = false; + HasGFNI = false; HasFMA = false; HasFMA4 = false; HasXOP = false; Index: test/CodeGen/X86/avx-gfni-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx-gfni-intrinsics.ll +++ test/CodeGen/X86/avx-gfni-intrinsics.ll @@ -0,0 +1,63 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+gfni,+avx -show-mc-encoding | FileCheck %s + +declare <16 x i8> @llvm.x86.gf2p8affineinvqb(<16 x i8>, <16 x i8>, i8) +define <16 x i8> @test_gf2p8affineinvqb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8affineinvqb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8affineinvqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xcf,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8affineinvqb(<16 x i8> %src1, <16 x i8> %src2, i8 11) + ret <16 x i8> %1 +} + +declare <32 x i8> @llvm.x86.vgf2p8affineinvqb(<32 x i8>, <32 x i8>, i8) +define <32 x i8> @test_vgf2p8affineinvqb(<32 x i8> %src1, <32 x i8> %src2) { +; CHECK-LABEL: test_vgf2p8affineinvqb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8affineinvqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xcf,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.vgf2p8affineinvqb(<32 x i8> %src1, <32 x i8> %src2, i8 11) + ret <32 x i8> %1 +} + +declare <16 x i8> @llvm.x86.gf2p8affineqb(<16 x i8>, <16 x i8>, i8) +define <16 x i8> @test_gf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8affineqb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8affineqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xce,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2, i8 11) + ret <16 x i8> %1 +} + +declare <32 x i8> @llvm.x86.vgf2p8affineqb(<32 x i8>, <32 x i8>, i8) +define <32 x i8> @test_vgf2p8affineqb(<32 x i8> %src1, <32 x i8> %src2) { +; CHECK-LABEL: test_vgf2p8affineqb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8affineqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xce,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.vgf2p8affineqb(<32 x i8> %src1, <32 x i8> %src2, i8 11) + ret <32 x i8> %1 +} + +declare <16 x i8> @llvm.x86.gf2p8mulb(<16 x i8>, <16 x i8>) +define <16 x i8> @test_gf2p8mulb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8mulb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xcf,0xc1] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8mulb(<16 x i8> %src1, <16 x i8> %src2) + ret <16 x i8> %1 +} + +declare <32 x i8> @llvm.x86.vgf2p8mulb(<32 x i8>, <32 x i8>) +define <32 x i8> @test_vgf2p8mulb(<32 x i8> %src1, <32 x i8> %src2) { +; CHECK-LABEL: test_vgf2p8mulb: +; CHECK: ## BB#0: +; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0xcf,0xc1] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.vgf2p8mulb(<32 x i8> %src1, <32 x i8> %src2) + ret <32 x i8> %1 +} + Index: test/CodeGen/X86/avx512-gfni-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512-gfni-intrinsics.ll +++ test/CodeGen/X86/avx512-gfni-intrinsics.ll @@ -0,0 +1,138 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl,+gfni,+avx512bw --show-mc-encoding | FileCheck %s + +declare <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i32, <16 x i8>, i16) +define <16 x i8> @test_gf2p8affineinvqb_mask_128(<16 x i8> %passthru, <16 x i8> %src1, <16 x i8> %src2, i16 %mask) { +; CHECK-LABEL: test_gf2p8affineinvqb_mask_128: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0xcf,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm2, %xmm1, %xmm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0xcf,0xca,0x03] +; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i32 3, <16 x i8> %passthru, i16 %mask) + %2 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i32 3, <16 x i8> zeroinitializer, i16 %mask) + %3 = xor <16 x i8> %1, %2 + ret <16 x i8> %3 +} + +declare <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i32, <32 x i8>, i32) +define <32 x i8> @test_gf2p8affineinvqb_mask_256(<32 x i8> %passthru, <32 x i8> %src1, <32 x i8> %src2, i32 %mask) { +; CHECK-LABEL: test_gf2p8affineinvqb_mask_256: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0xcf,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm2, %ymm1, %ymm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0xcf,0xca,0x03] +; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i32 3, <32 x i8> %passthru, i32 %mask) + %2 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i32 3, <32 x i8> zeroinitializer, i32 %mask) + %3 = xor <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +declare <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64) +define <64 x i8> @test_gf2p8affineinvqb_mask_512(<64 x i8> %passthru, <64 x i8> %src1, <64 x i8> %src2, i64 %mask) { +; CHECK-LABEL: test_gf2p8affineinvqb_mask_512: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x49,0xcf,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm2, %zmm1, %zmm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xc9,0xcf,0xca,0x03] +; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.512(<64 x i8> %src1, <64 x i8> %src2, i32 3, <64 x i8> %passthru, i64 %mask) + %2 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineinvqb.512(<64 x i8> %src1, <64 x i8> %src2, i32 3, <64 x i8> zeroinitializer, i64 %mask) + %3 = xor <64 x i8> %1, %2 + ret <64 x i8> %3 +} + +declare <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i32, <16 x i8>, i16) +define <16 x i8> @test_gf2p8affineqb_mask_128(<16 x i8> %passthru, <16 x i8> %src1, <16 x i8> %src2, i16 %mask) { +; CHECK-LABEL: test_gf2p8affineqb_mask_128: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineqb $3, %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0xce,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineqb $3, %xmm2, %xmm1, %xmm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0xce,0xca,0x03] +; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i32 3, <16 x i8> %passthru, i16 %mask) + %2 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i32 3, <16 x i8> zeroinitializer, i16 %mask) + %3 = xor <16 x i8> %1, %2 + ret <16 x i8> %3 +} + +declare <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i32, <32 x i8>, i32) +define <32 x i8> @test_gf2p8affineqb_mask_256(<32 x i8> %passthru, <32 x i8> %src1, <32 x i8> %src2, i32 %mask) { +; CHECK-LABEL: test_gf2p8affineqb_mask_256: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineqb $3, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0xce,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineqb $3, %ymm2, %ymm1, %ymm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0xce,0xca,0x03] +; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i32 3, <32 x i8> %passthru, i32 %mask) + %2 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i32 3, <32 x i8> zeroinitializer, i32 %mask) + %3 = xor <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +declare <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64) +define <64 x i8> @test_gf2p8affineqb_mask_512(<64 x i8> %passthru, <64 x i8> %src1, <64 x i8> %src2, i64 %mask) { +; CHECK-LABEL: test_gf2p8affineqb_mask_512: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8affineqb $3, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x49,0xce,0xc2,0x03] +; CHECK-NEXT: vgf2p8affineqb $3, %zmm2, %zmm1, %zmm1 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xc9,0xce,0xca,0x03] +; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.512(<64 x i8> %src1, <64 x i8> %src2, i32 3, <64 x i8> %passthru, i64 %mask) + %2 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8affineqb.512(<64 x i8> %src1, <64 x i8> %src2, i32 3, <64 x i8> zeroinitializer, i64 %mask) + %3 = xor <64 x i8> %1, %2 + ret <64 x i8> %3 +} + +declare <16 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) +define <16 x i8> @test_gf2p8mulb_mask_128(<16 x i8> %passthru, <16 x i8> %src1, <16 x i8> %src2, i16 %mask) { +; CHECK-LABEL: test_gf2p8mulb_mask_128: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8mulb %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xcf,0xc2] +; CHECK-NEXT: vgf2p8mulb %xmm2, %xmm1, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xcf,0xca] +; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) + %2 = call <16 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> zeroinitializer, i16 %mask) + %3 = xor <16 x i8> %1, %2 + ret <16 x i8> %3 +} + +declare <32 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) +define <32 x i8> @test_gf2p8mulb_mask_256(<32 x i8> %passthru, <32 x i8> %src1, <32 x i8> %src2, i32 %mask) { +; CHECK-LABEL: test_gf2p8mulb_mask_256: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8mulb %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xcf,0xc2] +; CHECK-NEXT: vgf2p8mulb %ymm2, %ymm1, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xcf,0xca] +; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) + %2 = call <32 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> zeroinitializer, i32 %mask) + %3 = xor <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +declare <64 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) +define <64 x i8> @test_gf2p8mulb_mask_512(<64 x i8> %passthru, <64 x i8> %src1, <64 x i8> %src2, i64 %mask) { +; CHECK-LABEL: test_gf2p8mulb_mask_512: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf] +; CHECK-NEXT: vgf2p8mulb %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x49,0xcf,0xc2] +; CHECK-NEXT: vgf2p8mulb %zmm2, %zmm1, %zmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xcf,0xca] +; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xef,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] + %1 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) + %2 = call <64 x i8> @llvm.x86.avx512.mask.vgf2p8mulb.512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> zeroinitializer, i64 %mask) + %3 = xor <64 x i8> %1, %2 + ret <64 x i8> %3 +} + Index: test/CodeGen/X86/gfni-intrinsics.ll =================================================================== --- test/CodeGen/X86/gfni-intrinsics.ll +++ test/CodeGen/X86/gfni-intrinsics.ll @@ -0,0 +1,33 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+gfni -show-mc-encoding | FileCheck %s + +declare <16 x i8> @llvm.x86.gf2p8affineinvqb(<16 x i8>, <16 x i8>, i8) +define <16 x i8> @test_gf2p8affineinvqb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8affineinvqb: +; CHECK: ## BB#0: +; CHECK-NEXT: gf2p8affineinvqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xcf,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8affineinvqb(<16 x i8> %src1, <16 x i8> %src2, i8 11) + ret <16 x i8> %1 +} + +declare <16 x i8> @llvm.x86.gf2p8affineqb(<16 x i8>, <16 x i8>, i8) +define <16 x i8> @test_gf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8affineqb: +; CHECK: ## BB#0: +; CHECK-NEXT: gf2p8affineqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xce,0xc1,0x0b] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2, i8 11) + ret <16 x i8> %1 +} + +declare <16 x i8> @llvm.x86.gf2p8mulb(<16 x i8>, <16 x i8>) +define <16 x i8> @test_gf2p8mulb(<16 x i8> %src1, <16 x i8> %src2) { +; CHECK-LABEL: test_gf2p8mulb: +; CHECK: ## BB#0: +; CHECK-NEXT: gf2p8mulb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xcf,0xc1] +; CHECK-NEXT: retl ## encoding: [0xc3] + %1 = call <16 x i8> @llvm.x86.gf2p8mulb(<16 x i8> %src1, <16 x i8> %src2) + ret <16 x i8> %1 +} + Index: test/MC/X86/avx512gfni-encoding.s =================================================================== --- test/MC/X86/avx512gfni-encoding.s +++ test/MC/X86/avx512gfni-encoding.s @@ -0,0 +1,178 @@ +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni,+avx512f+avx512bw --show-encoding < %s | FileCheck %s + +// CHECK: vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0xca,0x07] + vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %zmm2, %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0xca,0x07] + vgf2p8affineqb $7, %zmm2, %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, -256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, 256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, (%rcx), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, -256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, 256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x40,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x40,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, (%rcx), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x42,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x42,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb %zmm2, %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0xca] + vgf2p8mulb %zmm2, %zmm20, %zmm1 + +// CHECK: vgf2p8mulb %zmm2, %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0xca] + vgf2p8mulb %zmm2, %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb (%rcx), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x09] + vgf2p8mulb (%rcx), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb -256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb 256(%rsp), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x40,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 256(%rsp), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x40,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %zmm20, %zmm1 + +// CHECK: vgf2p8mulb (%rcx), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x09] + vgf2p8mulb (%rcx), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb -256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb 256(%rsp), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x42,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 256(%rsp), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x42,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %zmm20, %zmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to8}, %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x50,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx){1to8}, %zmm20, %zmm1 + +// CHECK: vgf2p8affineqb $7, (%rcx){1to8}, %zmm20, %zmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x50,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx){1to8}, %zmm20, %zmm1 + Index: test/MC/X86/avx512vl_gfni-encoding.s =================================================================== --- test/MC/X86/avx512vl_gfni-encoding.s +++ test/MC/X86/avx512vl_gfni-encoding.s @@ -0,0 +1,354 @@ +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni,+avx512vl,+avx512bw --show-encoding < %s | FileCheck %s + +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0xca,0x07] + vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %xmm2, %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0xca,0x07] + vgf2p8affineqb $7, %xmm2, %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, 64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, (%rcx), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, -64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, 64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x00,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x00,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, (%rcx), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x02,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x02,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0xca,0x07] + vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %ymm2, %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0xca,0x07] + vgf2p8affineqb $7, %ymm2, %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, 128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, (%rcx), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, -128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, 128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x20,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb3,0xdd,0x20,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, (%rcx), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf3,0xdd,0x22,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb3,0xdd,0x22,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb %xmm2, %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0xca] + vgf2p8mulb %xmm2, %xmm20, %xmm1 + +// CHECK: vgf2p8mulb %xmm2, %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0xca] + vgf2p8mulb %xmm2, %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb (%rcx), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x09] + vgf2p8mulb (%rcx), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb -64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb 64(%rsp), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x00,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 64(%rsp), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x00,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %xmm20, %xmm1 + +// CHECK: vgf2p8mulb (%rcx), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x09] + vgf2p8mulb (%rcx), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb -64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb 64(%rsp), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x02,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 64(%rsp), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x02,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %xmm20, %xmm1 {%k2} + +// CHECK: vgf2p8mulb %ymm2, %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0xca] + vgf2p8mulb %ymm2, %ymm20, %ymm1 + +// CHECK: vgf2p8mulb %ymm2, %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0xca] + vgf2p8mulb %ymm2, %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb (%rcx), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x09] + vgf2p8mulb (%rcx), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb -128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb 128(%rsp), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf2,0x5d,0x20,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 128(%rsp), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xb2,0x5d,0x20,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %ymm20, %ymm1 + +// CHECK: vgf2p8mulb (%rcx), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x09] + vgf2p8mulb (%rcx), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb -128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb 128(%rsp), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xf2,0x5d,0x22,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 128(%rsp), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} +// CHECK: encoding: [0x62,0xb2,0x5d,0x22,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %ymm20, %ymm1 {%k2} + +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to2}, %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x10,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx){1to2}, %xmm20, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx){1to4}, %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x30,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx){1to4}, %ymm20, %ymm1 + +// CHECK: vgf2p8affineqb $7, (%rcx){1to2}, %xmm20, %xmm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x10,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx){1to2}, %xmm20, %xmm1 + +// CHECK: vgf2p8affineqb $7, (%rcx){1to4}, %ymm20, %ymm1 +// CHECK: encoding: [0x62,0xf3,0xdd,0x30,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx){1to4}, %ymm20, %ymm1 + Index: test/MC/X86/gfni-encoding.s =================================================================== --- test/MC/X86/gfni-encoding.s +++ test/MC/X86/gfni-encoding.s @@ -0,0 +1,254 @@ +// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+gfni --show-encoding < %s | FileCheck %s + +// CHECK: gf2p8affineinvqb $7, %xmm2, %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0xca,0x07] + gf2p8affineinvqb $7, %xmm2, %xmm1 + +// CHECK: gf2p8affineqb $7, %xmm2, %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0xca,0x07] + gf2p8affineqb $7, %xmm2, %xmm1 + +// CHECK: gf2p8affineinvqb $7, (%rcx), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x09,0x07] + gf2p8affineinvqb $7, (%rcx), %xmm1 + +// CHECK: gf2p8affineinvqb $7, -4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x4c,0x24,0xfc,0x07] + gf2p8affineinvqb $7, -4(%rsp), %xmm1 + +// CHECK: gf2p8affineinvqb $7, 4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xcf,0x4c,0x24,0x04,0x07] + gf2p8affineinvqb $7, 4(%rsp), %xmm1 + +// CHECK: gf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + gf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + gf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + gf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8affineqb $7, (%rcx), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x09,0x07] + gf2p8affineqb $7, (%rcx), %xmm1 + +// CHECK: gf2p8affineqb $7, -4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x4c,0x24,0xfc,0x07] + gf2p8affineqb $7, -4(%rsp), %xmm1 + +// CHECK: gf2p8affineqb $7, 4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x3a,0xce,0x4c,0x24,0x04,0x07] + gf2p8affineqb $7, 4(%rsp), %xmm1 + +// CHECK: gf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + gf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + gf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x3a,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + gf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8mulb %xmm2, %xmm1 +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0xca] + gf2p8mulb %xmm2, %xmm1 + +// CHECK: gf2p8mulb (%rcx), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x09] + gf2p8mulb (%rcx), %xmm1 + +// CHECK: gf2p8mulb -4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x4c,0x24,0xfc] + gf2p8mulb -4(%rsp), %xmm1 + +// CHECK: gf2p8mulb 4(%rsp), %xmm1 +// CHECK: encoding: [0x66,0x0f,0x38,0xcf,0x4c,0x24,0x04] + gf2p8mulb 4(%rsp), %xmm1 + +// CHECK: gf2p8mulb 268435456(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + gf2p8mulb 268435456(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8mulb -536870912(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + gf2p8mulb -536870912(%rcx,%r14,8), %xmm1 + +// CHECK: gf2p8mulb -536870910(%rcx,%r14,8), %xmm1 +// CHECK: encoding: [0x66,0x42,0x0f,0x38,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + gf2p8mulb -536870910(%rcx,%r14,8), %xmm1 + +// CHECK: vgf2p8affineinvqb $7, %xmm2, %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %xmm2, %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, %xmm2, %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0xca,0x07] + vgf2p8affineqb $7, %xmm2, %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, 4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, (%rcx), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, -4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, 4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe3,0xa9,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa3,0xa9,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8affineinvqb $7, %ymm2, %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0xca,0x07] + vgf2p8affineinvqb $7, %ymm2, %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, %ymm2, %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0xca,0x07] + vgf2p8affineqb $7, %ymm2, %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, (%rcx), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x09,0x07] + vgf2p8affineinvqb $7, (%rcx), %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x4c,0x24,0xfc,0x07] + vgf2p8affineinvqb $7, -4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, 4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xcf,0x4c,0x24,0x04,0x07] + vgf2p8affineinvqb $7, 4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineinvqb $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineinvqb $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, (%rcx), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x09,0x07] + vgf2p8affineqb $7, (%rcx), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, -4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x4c,0x24,0xfc,0x07] + vgf2p8affineqb $7, -4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, 4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe3,0xad,0xce,0x4c,0x24,0x04,0x07] + vgf2p8affineqb $7, 4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x00,0x00,0x00,0x10,0x07] + vgf2p8affineqb $7, 268435456(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x00,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870912(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa3,0xad,0xce,0x8c,0xf1,0x02,0x00,0x00,0xe0,0x07] + vgf2p8affineqb $7, -536870910(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb %xmm2, %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0xca] + vgf2p8mulb %xmm2, %xmm10, %xmm1 + +// CHECK: vgf2p8mulb (%rcx), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x09] + vgf2p8mulb (%rcx), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb -4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb 4(%rsp), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xe2,0x29,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 4(%rsp), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %xmm10, %xmm1 +// CHECK: encoding: [0xc4,0xa2,0x29,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %xmm10, %xmm1 + +// CHECK: vgf2p8mulb %ymm2, %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0xca] + vgf2p8mulb %ymm2, %ymm10, %ymm1 + +// CHECK: vgf2p8mulb (%rcx), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x09] + vgf2p8mulb (%rcx), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb -4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x4c,0x24,0xfc] + vgf2p8mulb -4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb 4(%rsp), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xe2,0x2d,0xcf,0x4c,0x24,0x04] + vgf2p8mulb 4(%rsp), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb 268435456(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x00,0x00,0x00,0x10] + vgf2p8mulb 268435456(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb -536870912(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x00,0x00,0x00,0xe0] + vgf2p8mulb -536870912(%rcx,%r14,8), %ymm10, %ymm1 + +// CHECK: vgf2p8mulb -536870910(%rcx,%r14,8), %ymm10, %ymm1 +// CHECK: encoding: [0xc4,0xa2,0x2d,0xcf,0x8c,0xf1,0x02,0x00,0x00,0xe0] + vgf2p8mulb -536870910(%rcx,%r14,8), %ymm10, %ymm1 +