Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -5232,6 +5232,24 @@ Intrinsic<[llvm_v32i16_ty], [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_dbpsadbw_128 : + GCCBuiltin<"__builtin_ia32_dbpsadbw128_mask">, + Intrinsic<[llvm_v8i16_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v8i16_ty, + llvm_i8_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_dbpsadbw_256 : + GCCBuiltin<"__builtin_ia32_dbpsadbw256_mask">, + Intrinsic<[llvm_v16i16_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v16i16_ty, + llvm_i16_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_dbpsadbw_512 : + GCCBuiltin<"__builtin_ia32_dbpsadbw512_mask">, + Intrinsic<[llvm_v32i16_ty], + [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v32i16_ty, + llvm_i32_ty], [IntrNoMem]>; } // Gather and Scatter ops @@ -5632,15 +5650,35 @@ } let TargetPrefix = "x86" in { - def int_x86_avx512_mask_valign_q_512 : GCCBuiltin<"__builtin_ia32_alignq512_mask">, + def int_x86_avx512_mask_valign_q_512 : + GCCBuiltin<"__builtin_ia32_alignq512_mask">, Intrinsic<[llvm_v8i64_ty], - [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_i8_ty], - [IntrNoMem]>; + [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty, + llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_valign_d_512 : GCCBuiltin<"__builtin_ia32_alignd512_mask">, + def int_x86_avx512_mask_valign_d_512 : + GCCBuiltin<"__builtin_ia32_alignd512_mask">, Intrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i8_ty, llvm_v16i32_ty, llvm_i16_ty], - [IntrNoMem]>; + [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty, + llvm_i16_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_palignr_128 : + GCCBuiltin<"__builtin_ia32_palignr128_mask">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, + llvm_i16_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_palignr_256 : + GCCBuiltin<"__builtin_ia32_palignr256_mask">, + Intrinsic<[llvm_v32i8_ty], + [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v32i8_ty, + llvm_i32_ty], [IntrNoMem]>; + + def int_x86_avx512_mask_palignr_512 : + GCCBuiltin<"__builtin_ia32_palignr512_mask">, + Intrinsic<[llvm_v64i8_ty], + [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v64i8_ty, + llvm_i64_ty], [IntrNoMem]>; } // Compares Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -182,6 +182,8 @@ /// Compute Sum of Absolute Differences. PSADBW, + /// Compute Double Block Packed Sum-Absolute-Differences + DBPSADBW, /// Bitwise Logical AND NOT of Packed FP values. ANDNP, Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -19232,6 +19232,7 @@ case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; case X86ISD::PMULDQ: return "X86ISD::PMULDQ"; case X86ISD::PSADBW: return "X86ISD::PSADBW"; + case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW"; case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -6443,30 +6443,40 @@ //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm) // op(reg_vec2,mem_vec,imm) +multiclass avx512_3Op_rm_imm8 opc, string OpcodeStr, SDNode OpNode, + X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo>{ + + defm rri : AVX512_maskable; + let mayLoad = 1 in + defm rmi : AVX512_maskable; +} + +//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm) +// op(reg_vec2,mem_vec,imm) // op(reg_vec2,broadcast(eltVt),imm) multiclass avx512_3Op_imm8 opc, string OpcodeStr, SDNode OpNode, - X86VectorVTInfo _>{ - defm rri : AVX512_maskable; - let mayLoad = 1 in { - defm rmi : AVX512_maskable; + X86VectorVTInfo _>: + avx512_3Op_rm_imm8{ + + let mayLoad = 1 in defm rmbi : AVX512_maskable, EVEX_B; - } + (i32 imm:$src3))>, EVEX_B; } //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm) @@ -6542,6 +6552,20 @@ } } +multiclass avx512_common_3Op_rm_imm8 opc, SDNode OpNode, string OpStr, + AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo>{ + let Predicates = [HasBWI] in { + defm Z : avx512_3Op_rm_imm8, EVEX_V512, AVX512AIi8Base, EVEX_4V; + } + let Predicates = [HasBWI, HasVLX] in { + defm Z128 : avx512_3Op_rm_imm8, EVEX_V128, AVX512AIi8Base, EVEX_4V; + defm Z256 : avx512_3Op_rm_imm8, EVEX_V256, AVX512AIi8Base, EVEX_4V; + } +} + multiclass avx512_common_3Op_imm8 opc, SDNode OpNode>{ let Predicates = [HasAVX512] in { @@ -6665,6 +6689,12 @@ defm VALIGNQ: avx512_valign<"valignq", avx512vl_i64_info, avx512vl_f64_info>, EVEX_CD8<64, CD8VF>, VEX_W; +defm VPALIGN: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" , + avx512vl_i8_info, avx512vl_i8_info>, EVEX_CD8<8, CD8VF>; + +defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw" , + avx512vl_i16_info, avx512vl_i8_info>, EVEX_CD8<8, CD8VF>; + multiclass avx512_unary_rm opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> { defm rr : AVX512_maskable, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>>; +def X86dbpsadbw : SDNode<"X86ISD::DBPSADBW", + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisVec<1>, + SDTCisSameAs<1,2>, SDTCisInt<3>]>>; def X86andnp : SDNode<"X86ISD::ANDNP", SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>>; @@ -256,9 +259,9 @@ def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>, SDTCisVec<0>, SDTCisInt<3>, SDTCisInt<4>]>; -def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>; -def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>; -def X86Abs : SDNode<"X86ISD::ABS", SDTIntUnaryOp>; +def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>; +def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>; +def X86Abs : SDNode<"X86ISD::ABS", SDTIntUnaryOp>; def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>; def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>; Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -610,7 +610,12 @@ ISD::UINT_TO_FP, 0), X86_INTRINSIC_DATA(avx512_mask_cvtuqq2ps_512, INTR_TYPE_1OP_MASK, ISD::UINT_TO_FP, ISD::UINT_TO_FP), - + X86_INTRINSIC_DATA(avx512_mask_dbpsadbw_128, INTR_TYPE_3OP_MASK, + X86ISD::DBPSADBW, 0), + X86_INTRINSIC_DATA(avx512_mask_dbpsadbw_256, INTR_TYPE_3OP_MASK, + X86ISD::DBPSADBW, 0), + X86_INTRINSIC_DATA(avx512_mask_dbpsadbw_512, INTR_TYPE_3OP_MASK, + X86ISD::DBPSADBW, 0), X86_INTRINSIC_DATA(avx512_mask_div_pd_128, INTR_TYPE_2OP_MASK, ISD::FDIV, 0), X86_INTRINSIC_DATA(avx512_mask_div_pd_256, INTR_TYPE_2OP_MASK, ISD::FDIV, 0), X86_INTRINSIC_DATA(avx512_mask_div_pd_512, INTR_TYPE_2OP_MASK, ISD::FDIV, @@ -753,6 +758,9 @@ X86_INTRINSIC_DATA(avx512_mask_paddus_w_128, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(avx512_mask_paddus_w_256, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(avx512_mask_paddus_w_512, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0), + X86_INTRINSIC_DATA(avx512_mask_palignr_128, INTR_TYPE_3OP_MASK, X86ISD::PALIGNR, 0), + X86_INTRINSIC_DATA(avx512_mask_palignr_256, INTR_TYPE_3OP_MASK, X86ISD::PALIGNR, 0), + X86_INTRINSIC_DATA(avx512_mask_palignr_512, INTR_TYPE_3OP_MASK, X86ISD::PALIGNR, 0), X86_INTRINSIC_DATA(avx512_mask_pand_d_128, INTR_TYPE_2OP_MASK, ISD::AND, 0), X86_INTRINSIC_DATA(avx512_mask_pand_d_256, INTR_TYPE_2OP_MASK, ISD::AND, 0), X86_INTRINSIC_DATA(avx512_mask_pand_d_512, INTR_TYPE_2OP_MASK, ISD::AND, 0), Index: test/CodeGen/X86/avx512-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512-intrinsics.ll +++ test/CodeGen/X86/avx512-intrinsics.ll @@ -566,27 +566,27 @@ define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: test_valign_q: ; CHECK: valignq $2, %zmm1, %zmm0, %zmm0 - %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i8 2, <8 x i64> zeroinitializer, i8 -1) + %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1) ret <8 x i64> %res } define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) { ; CHECK-LABEL: test_mask_valign_q: ; CHECK: valignq $2, %zmm1, %zmm0, %zmm2 {%k1} - %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i8 2, <8 x i64> %src, i8 %mask) + %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> %src, i8 %mask) ret <8 x i64> %res } -declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i8, <8 x i64>, i8) +declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8) define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) { ; CHECK-LABEL: test_maskz_valign_d: ; CHECK: valignd $5, %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xc9,0x03,0xc1,0x05] - %res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i8 5, <16 x i32> zeroinitializer, i16 %mask) + %res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i32 5, <16 x i32> zeroinitializer, i16 %mask) ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32>, <16 x i32>, i8, <16 x i32>, i16) +declare <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16) define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) { ; CHECK-LABEL: test_mask_store_ss Index: test/CodeGen/X86/avx512bw-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512bw-intrinsics.ll +++ test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1180,4 +1180,44 @@ %res1 = call <32 x i16> @llvm.x86.avx512.mask.punpcklw.d.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) %res2 = add <32 x i16> %res, %res1 ret <32 x i16> %res2 -} \ No newline at end of file +} + +declare <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8>, <64 x i8>, i32, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_512: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z} +; CHECK-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> %x3, i32 %x4) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> zeroinitializer, i32 %x4) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> %x3, i32 -1) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64) + +define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_palignr_512: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovq %rdi, %k1 +; CHECK-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm3 {%k1} {z} +; CHECK-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddb %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vpaddb %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 %x4) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> zeroinitializer, i64 %x4) + %res2 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 -1) + %res3 = add <64 x i8> %res, %res1 + %res4 = add <64 x i8> %res3, %res2 + ret <64 x i8> %res4 +} Index: test/CodeGen/X86/avx512bwvl-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512bwvl-intrinsics.ll +++ test/CodeGen/X86/avx512bwvl-intrinsics.ll @@ -4194,3 +4194,84 @@ %res2 = add <16 x i16> %res, %res1 ret <16 x i16> %res2 } + +declare <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8>, <16 x i8>, i32, <8 x i16>, i8) + +define <8 x i16>@test_int_x86_avx512_mask_dbpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_128: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm3 {%k1} {z} +; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = call <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <8 x i16> %x3, i8 %x4) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <8 x i16> zeroinitializer, i8 %x4) + %res2 = call <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <8 x i16> %x3, i8 -1) + %res3 = add <8 x i16> %res, %res1 + %res4 = add <8 x i16> %res2, %res3 + ret <8 x i16> %res4 +} + +declare <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8>, <32 x i8>, i32, <16 x i16>, i16) + +define <16 x i16>@test_int_x86_avx512_mask_dbpsadbw_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x3, i16 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_256: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm3 {%k1} {z} +; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 +; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = call <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <16 x i16> %x3, i16 %x4) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <16 x i16> zeroinitializer, i16 %x4) + %res2 = call <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <16 x i16> %x3, i16 -1) + %res3 = add <16 x i16> %res, %res1 + %res4 = add <16 x i16> %res3, %res2 + ret <16 x i16> %res4 +} + +declare <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8>, <16 x i8>, i32, <16 x i8>, i16) + +define <16 x i8>@test_int_x86_avx512_mask_palignr_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x3, i16 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_palignr_128: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm3 {%k1} {z} +; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpaddb %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: retq + %res = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 %x4) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> zeroinitializer, i16 %x4) + %res2 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 -1) + %res3 = add <16 x i8> %res, %res1 + %res4 = add <16 x i8> %res3, %res2 + ret <16 x i8> %res4 +} + +declare <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8>, <32 x i8>, i32, <32 x i8>, i32) + +define <32 x i8>@test_int_x86_avx512_mask_palignr_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x3, i32 %x4) { +; CHECK-LABEL: test_int_x86_avx512_mask_palignr_256: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm3 {%k1} {z} +; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpaddb %ymm3, %ymm2, %ymm1 +; CHECK-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 %x4) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> zeroinitializer, i32 %x4) + %res2 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 -1) + %res3 = add <32 x i8> %res, %res1 + %res4 = add <32 x i8> %res3, %res2 + ret <32 x i8> %res4 +} Index: test/MC/X86/x86-64-avx512bw.s =================================================================== --- test/MC/X86/x86-64-avx512bw.s +++ test/MC/X86/x86-64-avx512bw.s @@ -4112,3 +4112,83 @@ // CHECK: encoding: [0x62,0x61,0x2d,0x40,0x69,0xb2,0xc0,0xdf,0xff,0xff] vpunpckhwd -8256(%rdx), %zmm26, %zmm30 +// CHECK: vpalignr $171, %zmm17, %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xa3,0x2d,0x40,0x0f,0xf1,0xab] + vpalignr $171, %zmm17, %zmm26, %zmm22 + +// CHECK: vpalignr $171, %zmm17, %zmm26, %zmm22 {%k3} +// CHECK: encoding: [0x62,0xa3,0x2d,0x43,0x0f,0xf1,0xab] + vpalignr $171, %zmm17, %zmm26, %zmm22 {%k3} + +// CHECK: vpalignr $171, %zmm17, %zmm26, %zmm22 {%k3} {z} +// CHECK: encoding: [0x62,0xa3,0x2d,0xc3,0x0f,0xf1,0xab] + vpalignr $171, %zmm17, %zmm26, %zmm22 {%k3} {z} + +// CHECK: vpalignr $123, %zmm17, %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xa3,0x2d,0x40,0x0f,0xf1,0x7b] + vpalignr $123, %zmm17, %zmm26, %zmm22 + +// CHECK: vpalignr $123, (%rcx), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xe3,0x2d,0x40,0x0f,0x31,0x7b] + vpalignr $123, (%rcx), %zmm26, %zmm22 + +// CHECK: vpalignr $123, 291(%rax,%r14,8), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xa3,0x2d,0x40,0x0f,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpalignr $123, 291(%rax,%r14,8), %zmm26, %zmm22 + +// CHECK: vpalignr $123, 8128(%rdx), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xe3,0x2d,0x40,0x0f,0x72,0x7f,0x7b] + vpalignr $123, 8128(%rdx), %zmm26, %zmm22 + +// CHECK: vpalignr $123, 8192(%rdx), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xe3,0x2d,0x40,0x0f,0xb2,0x00,0x20,0x00,0x00,0x7b] + vpalignr $123, 8192(%rdx), %zmm26, %zmm22 + +// CHECK: vpalignr $123, -8192(%rdx), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xe3,0x2d,0x40,0x0f,0x72,0x80,0x7b] + vpalignr $123, -8192(%rdx), %zmm26, %zmm22 + +// CHECK: vpalignr $123, -8256(%rdx), %zmm26, %zmm22 +// CHECK: encoding: [0x62,0xe3,0x2d,0x40,0x0f,0xb2,0xc0,0xdf,0xff,0xff,0x7b] + vpalignr $123, -8256(%rdx), %zmm26, %zmm22 + +// CHECK: vdbpsadbw $171, %zmm18, %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xa3,0x5d,0x40,0x42,0xea,0xab] + vdbpsadbw $171, %zmm18, %zmm20, %zmm21 + +// CHECK: vdbpsadbw $171, %zmm18, %zmm20, %zmm21 {%k1} +// CHECK: encoding: [0x62,0xa3,0x5d,0x41,0x42,0xea,0xab] + vdbpsadbw $171, %zmm18, %zmm20, %zmm21 {%k1} + +// CHECK: vdbpsadbw $171, %zmm18, %zmm20, %zmm21 {%k1} {z} +// CHECK: encoding: [0x62,0xa3,0x5d,0xc1,0x42,0xea,0xab] + vdbpsadbw $171, %zmm18, %zmm20, %zmm21 {%k1} {z} + +// CHECK: vdbpsadbw $123, %zmm18, %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xa3,0x5d,0x40,0x42,0xea,0x7b] + vdbpsadbw $123, %zmm18, %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, (%rcx), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xe3,0x5d,0x40,0x42,0x29,0x7b] + vdbpsadbw $123, (%rcx), %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, 291(%rax,%r14,8), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xa3,0x5d,0x40,0x42,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b] + vdbpsadbw $123, 291(%rax,%r14,8), %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, 8128(%rdx), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xe3,0x5d,0x40,0x42,0x6a,0x7f,0x7b] + vdbpsadbw $123, 8128(%rdx), %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, 8192(%rdx), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xe3,0x5d,0x40,0x42,0xaa,0x00,0x20,0x00,0x00,0x7b] + vdbpsadbw $123, 8192(%rdx), %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, -8192(%rdx), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xe3,0x5d,0x40,0x42,0x6a,0x80,0x7b] + vdbpsadbw $123, -8192(%rdx), %zmm20, %zmm21 + +// CHECK: vdbpsadbw $123, -8256(%rdx), %zmm20, %zmm21 +// CHECK: encoding: [0x62,0xe3,0x5d,0x40,0x42,0xaa,0xc0,0xdf,0xff,0xff,0x7b] + vdbpsadbw $123, -8256(%rdx), %zmm20, %zmm21 + Index: test/MC/X86/x86-64-avx512bw_vl.s =================================================================== --- test/MC/X86/x86-64-avx512bw_vl.s +++ test/MC/X86/x86-64-avx512bw_vl.s @@ -8399,3 +8399,323 @@ // CHECK: encoding: [0x62,0x61,0x35,0x20,0x69,0xa2,0xe0,0xef,0xff,0xff] vpunpckhwd -4128(%rdx), %ymm25, %ymm28 +// CHECK: vpalignr $171, %xmm21, %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xa3,0x2d,0x00,0x0f,0xdd,0xab] + vpalignr $171, %xmm21, %xmm26, %xmm19 + +// CHECK: vpalignr $171, %xmm21, %xmm26, %xmm19 {%k4} +// CHECK: encoding: [0x62,0xa3,0x2d,0x04,0x0f,0xdd,0xab] + vpalignr $171, %xmm21, %xmm26, %xmm19 {%k4} + +// CHECK: vpalignr $171, %xmm21, %xmm26, %xmm19 {%k4} {z} +// CHECK: encoding: [0x62,0xa3,0x2d,0x84,0x0f,0xdd,0xab] + vpalignr $171, %xmm21, %xmm26, %xmm19 {%k4} {z} + +// CHECK: vpalignr $123, %xmm21, %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xa3,0x2d,0x00,0x0f,0xdd,0x7b] + vpalignr $123, %xmm21, %xmm26, %xmm19 + +// CHECK: vpalignr $123, (%rcx), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xe3,0x2d,0x00,0x0f,0x19,0x7b] + vpalignr $123, (%rcx), %xmm26, %xmm19 + +// CHECK: vpalignr $123, 291(%rax,%r14,8), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xa3,0x2d,0x00,0x0f,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpalignr $123, 291(%rax,%r14,8), %xmm26, %xmm19 + +// CHECK: vpalignr $123, 2032(%rdx), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xe3,0x2d,0x00,0x0f,0x5a,0x7f,0x7b] + vpalignr $123, 2032(%rdx), %xmm26, %xmm19 + +// CHECK: vpalignr $123, 2048(%rdx), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xe3,0x2d,0x00,0x0f,0x9a,0x00,0x08,0x00,0x00,0x7b] + vpalignr $123, 2048(%rdx), %xmm26, %xmm19 + +// CHECK: vpalignr $123, -2048(%rdx), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xe3,0x2d,0x00,0x0f,0x5a,0x80,0x7b] + vpalignr $123, -2048(%rdx), %xmm26, %xmm19 + +// CHECK: vpalignr $123, -2064(%rdx), %xmm26, %xmm19 +// CHECK: encoding: [0x62,0xe3,0x2d,0x00,0x0f,0x9a,0xf0,0xf7,0xff,0xff,0x7b] + vpalignr $123, -2064(%rdx), %xmm26, %xmm19 + +// CHECK: vpalignr $171, %ymm22, %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x23,0x55,0x20,0x0f,0xde,0xab] + vpalignr $171, %ymm22, %ymm21, %ymm27 + +// CHECK: vpalignr $171, %ymm22, %ymm21, %ymm27 {%k2} +// CHECK: encoding: [0x62,0x23,0x55,0x22,0x0f,0xde,0xab] + vpalignr $171, %ymm22, %ymm21, %ymm27 {%k2} + +// CHECK: vpalignr $171, %ymm22, %ymm21, %ymm27 {%k2} {z} +// CHECK: encoding: [0x62,0x23,0x55,0xa2,0x0f,0xde,0xab] + vpalignr $171, %ymm22, %ymm21, %ymm27 {%k2} {z} + +// CHECK: vpalignr $123, %ymm22, %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x23,0x55,0x20,0x0f,0xde,0x7b] + vpalignr $123, %ymm22, %ymm21, %ymm27 + +// CHECK: vpalignr $123, (%rcx), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x63,0x55,0x20,0x0f,0x19,0x7b] + vpalignr $123, (%rcx), %ymm21, %ymm27 + +// CHECK: vpalignr $123, 291(%rax,%r14,8), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x23,0x55,0x20,0x0f,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpalignr $123, 291(%rax,%r14,8), %ymm21, %ymm27 + +// CHECK: vpalignr $123, 4064(%rdx), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x63,0x55,0x20,0x0f,0x5a,0x7f,0x7b] + vpalignr $123, 4064(%rdx), %ymm21, %ymm27 + +// CHECK: vpalignr $123, 4096(%rdx), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x63,0x55,0x20,0x0f,0x9a,0x00,0x10,0x00,0x00,0x7b] + vpalignr $123, 4096(%rdx), %ymm21, %ymm27 + +// CHECK: vpalignr $123, -4096(%rdx), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x63,0x55,0x20,0x0f,0x5a,0x80,0x7b] + vpalignr $123, -4096(%rdx), %ymm21, %ymm27 + +// CHECK: vpalignr $123, -4128(%rdx), %ymm21, %ymm27 +// CHECK: encoding: [0x62,0x63,0x55,0x20,0x0f,0x9a,0xe0,0xef,0xff,0xff,0x7b] + vpalignr $123, -4128(%rdx), %ymm21, %ymm27 + +// CHECK: vdbpsadbw $171, %xmm17, %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xa3,0x35,0x00,0x42,0xf1,0xab] + vdbpsadbw $171, %xmm17, %xmm25, %xmm22 + +// CHECK: vdbpsadbw $171, %xmm17, %xmm25, %xmm22 {%k3} +// CHECK: encoding: [0x62,0xa3,0x35,0x03,0x42,0xf1,0xab] + vdbpsadbw $171, %xmm17, %xmm25, %xmm22 {%k3} + +// CHECK: vdbpsadbw $171, %xmm17, %xmm25, %xmm22 {%k3} {z} +// CHECK: encoding: [0x62,0xa3,0x35,0x83,0x42,0xf1,0xab] + vdbpsadbw $171, %xmm17, %xmm25, %xmm22 {%k3} {z} + +// CHECK: vdbpsadbw $123, %xmm17, %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xa3,0x35,0x00,0x42,0xf1,0x7b] + vdbpsadbw $123, %xmm17, %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, (%rcx), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x42,0x31,0x7b] + vdbpsadbw $123, (%rcx), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, 291(%rax,%r14,8), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xa3,0x35,0x00,0x42,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b] + vdbpsadbw $123, 291(%rax,%r14,8), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, 2032(%rdx), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x42,0x72,0x7f,0x7b] + vdbpsadbw $123, 2032(%rdx), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, 2048(%rdx), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x42,0xb2,0x00,0x08,0x00,0x00,0x7b] + vdbpsadbw $123, 2048(%rdx), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, -2048(%rdx), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x42,0x72,0x80,0x7b] + vdbpsadbw $123, -2048(%rdx), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $123, -2064(%rdx), %xmm25, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x42,0xb2,0xf0,0xf7,0xff,0xff,0x7b] + vdbpsadbw $123, -2064(%rdx), %xmm25, %xmm22 + +// CHECK: vdbpsadbw $171, %ymm20, %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xa3,0x65,0x20,0x42,0xcc,0xab] + vdbpsadbw $171, %ymm20, %ymm19, %ymm17 + +// CHECK: vdbpsadbw $171, %ymm20, %ymm19, %ymm17 {%k5} +// CHECK: encoding: [0x62,0xa3,0x65,0x25,0x42,0xcc,0xab] + vdbpsadbw $171, %ymm20, %ymm19, %ymm17 {%k5} + +// CHECK: vdbpsadbw $171, %ymm20, %ymm19, %ymm17 {%k5} {z} +// CHECK: encoding: [0x62,0xa3,0x65,0xa5,0x42,0xcc,0xab] + vdbpsadbw $171, %ymm20, %ymm19, %ymm17 {%k5} {z} + +// CHECK: vdbpsadbw $123, %ymm20, %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xa3,0x65,0x20,0x42,0xcc,0x7b] + vdbpsadbw $123, %ymm20, %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, (%rcx), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xe3,0x65,0x20,0x42,0x09,0x7b] + vdbpsadbw $123, (%rcx), %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, 291(%rax,%r14,8), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xa3,0x65,0x20,0x42,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b] + vdbpsadbw $123, 291(%rax,%r14,8), %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, 4064(%rdx), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xe3,0x65,0x20,0x42,0x4a,0x7f,0x7b] + vdbpsadbw $123, 4064(%rdx), %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, 4096(%rdx), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xe3,0x65,0x20,0x42,0x8a,0x00,0x10,0x00,0x00,0x7b] + vdbpsadbw $123, 4096(%rdx), %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, -4096(%rdx), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xe3,0x65,0x20,0x42,0x4a,0x80,0x7b] + vdbpsadbw $123, -4096(%rdx), %ymm19, %ymm17 + +// CHECK: vdbpsadbw $123, -4128(%rdx), %ymm19, %ymm17 +// CHECK: encoding: [0x62,0xe3,0x65,0x20,0x42,0x8a,0xe0,0xef,0xff,0xff,0x7b] + vdbpsadbw $123, -4128(%rdx), %ymm19, %ymm17 + +// CHECK: vpalignr $171, %xmm25, %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x03,0x5d,0x00,0x0f,0xf1,0xab] + vpalignr $0xab, %xmm25, %xmm20, %xmm30 + +// CHECK: vpalignr $171, %xmm25, %xmm20, %xmm30 {%k2} +// CHECK: encoding: [0x62,0x03,0x5d,0x02,0x0f,0xf1,0xab] + vpalignr $0xab, %xmm25, %xmm20, %xmm30 {%k2} + +// CHECK: vpalignr $171, %xmm25, %xmm20, %xmm30 {%k2} {z} +// CHECK: encoding: [0x62,0x03,0x5d,0x82,0x0f,0xf1,0xab] + vpalignr $0xab, %xmm25, %xmm20, %xmm30 {%k2} {z} + +// CHECK: vpalignr $123, %xmm25, %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x03,0x5d,0x00,0x0f,0xf1,0x7b] + vpalignr $0x7b, %xmm25, %xmm20, %xmm30 + +// CHECK: vpalignr $123, (%rcx), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x63,0x5d,0x00,0x0f,0x31,0x7b] + vpalignr $0x7b,(%rcx), %xmm20, %xmm30 + +// CHECK: vpalignr $123, 4660(%rax,%r14,8), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x23,0x5d,0x00,0x0f,0xb4,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpalignr $0x7b,4660(%rax,%r14,8), %xmm20, %xmm30 + +// CHECK: vpalignr $123, 2032(%rdx), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x63,0x5d,0x00,0x0f,0x72,0x7f,0x7b] + vpalignr $0x7b,2032(%rdx), %xmm20, %xmm30 + +// CHECK: vpalignr $123, 2048(%rdx), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x63,0x5d,0x00,0x0f,0xb2,0x00,0x08,0x00,0x00,0x7b] + vpalignr $0x7b,2048(%rdx), %xmm20, %xmm30 + +// CHECK: vpalignr $123, -2048(%rdx), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x63,0x5d,0x00,0x0f,0x72,0x80,0x7b] + vpalignr $0x7b,-2048(%rdx), %xmm20, %xmm30 + +// CHECK: vpalignr $123, -2064(%rdx), %xmm20, %xmm30 +// CHECK: encoding: [0x62,0x63,0x5d,0x00,0x0f,0xb2,0xf0,0xf7,0xff,0xff,0x7b] + vpalignr $0x7b,-2064(%rdx), %xmm20, %xmm30 + +// CHECK: vpalignr $171, %ymm27, %ymm17, %ymm21 +// CHECK: encoding: [0x62,0x83,0x75,0x20,0x0f,0xeb,0xab] + vpalignr $0xab, %ymm27, %ymm17, %ymm21 + +// CHECK: vpalignr $171, %ymm27, %ymm17, %ymm21 {%k7} +// CHECK: encoding: [0x62,0x83,0x75,0x27,0x0f,0xeb,0xab] + vpalignr $0xab, %ymm27, %ymm17, %ymm21 {%k7} + +// CHECK: vpalignr $171, %ymm27, %ymm17, %ymm21 {%k7} {z} +// CHECK: encoding: [0x62,0x83,0x75,0xa7,0x0f,0xeb,0xab] + vpalignr $0xab, %ymm27, %ymm17, %ymm21 {%k7} {z} + +// CHECK: vpalignr $123, %ymm27, %ymm17, %ymm21 +// CHECK: encoding: [0x62,0x83,0x75,0x20,0x0f,0xeb,0x7b] + vpalignr $0x7b, %ymm27, %ymm17, %ymm21 + +// CHECK: vpalignr $123, (%rcx), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xe3,0x75,0x20,0x0f,0x29,0x7b] + vpalignr $0x7b,(%rcx), %ymm17, %ymm21 + +// CHECK: vpalignr $123, 4660(%rax,%r14,8), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xa3,0x75,0x20,0x0f,0xac,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpalignr $0x7b,4660(%rax,%r14,8), %ymm17, %ymm21 + +// CHECK: vpalignr $123, 4064(%rdx), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xe3,0x75,0x20,0x0f,0x6a,0x7f,0x7b] + vpalignr $0x7b,4064(%rdx), %ymm17, %ymm21 + +// CHECK: vpalignr $123, 4096(%rdx), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xe3,0x75,0x20,0x0f,0xaa,0x00,0x10,0x00,0x00,0x7b] + vpalignr $0x7b,4096(%rdx), %ymm17, %ymm21 + +// CHECK: vpalignr $123, -4096(%rdx), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xe3,0x75,0x20,0x0f,0x6a,0x80,0x7b] + vpalignr $0x7b,-4096(%rdx), %ymm17, %ymm21 + +// CHECK: vpalignr $123, -4128(%rdx), %ymm17, %ymm21 +// CHECK: encoding: [0x62,0xe3,0x75,0x20,0x0f,0xaa,0xe0,0xef,0xff,0xff,0x7b] + vpalignr $0x7b,-4128(%rdx), %ymm17, %ymm21 + +// CHECK: vdbpsadbw $171, %xmm20, %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xa3,0x15,0x00,0x42,0xcc,0xab] + vdbpsadbw $0xab, %xmm20, %xmm29, %xmm17 + +// CHECK: vdbpsadbw $171, %xmm20, %xmm29, %xmm17 {%k4} +// CHECK: encoding: [0x62,0xa3,0x15,0x04,0x42,0xcc,0xab] + vdbpsadbw $0xab, %xmm20, %xmm29, %xmm17 {%k4} + +// CHECK: vdbpsadbw $171, %xmm20, %xmm29, %xmm17 {%k4} {z} +// CHECK: encoding: [0x62,0xa3,0x15,0x84,0x42,0xcc,0xab] + vdbpsadbw $0xab, %xmm20, %xmm29, %xmm17 {%k4} {z} + +// CHECK: vdbpsadbw $123, %xmm20, %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xa3,0x15,0x00,0x42,0xcc,0x7b] + vdbpsadbw $0x7b, %xmm20, %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, (%rcx), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x42,0x09,0x7b] + vdbpsadbw $0x7b,(%rcx), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, 4660(%rax,%r14,8), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xa3,0x15,0x00,0x42,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b] + vdbpsadbw $0x7b,4660(%rax,%r14,8), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, 2032(%rdx), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x42,0x4a,0x7f,0x7b] + vdbpsadbw $0x7b,2032(%rdx), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, 2048(%rdx), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x42,0x8a,0x00,0x08,0x00,0x00,0x7b] + vdbpsadbw $0x7b,2048(%rdx), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, -2048(%rdx), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x42,0x4a,0x80,0x7b] + vdbpsadbw $0x7b,-2048(%rdx), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $123, -2064(%rdx), %xmm29, %xmm17 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x42,0x8a,0xf0,0xf7,0xff,0xff,0x7b] + vdbpsadbw $0x7b,-2064(%rdx), %xmm29, %xmm17 + +// CHECK: vdbpsadbw $171, %ymm26, %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x03,0x1d,0x20,0x42,0xd2,0xab] + vdbpsadbw $0xab, %ymm26, %ymm28, %ymm26 + +// CHECK: vdbpsadbw $171, %ymm26, %ymm28, %ymm26 {%k4} +// CHECK: encoding: [0x62,0x03,0x1d,0x24,0x42,0xd2,0xab] + vdbpsadbw $0xab, %ymm26, %ymm28, %ymm26 {%k4} + +// CHECK: vdbpsadbw $171, %ymm26, %ymm28, %ymm26 {%k4} {z} +// CHECK: encoding: [0x62,0x03,0x1d,0xa4,0x42,0xd2,0xab] + vdbpsadbw $0xab, %ymm26, %ymm28, %ymm26 {%k4} {z} + +// CHECK: vdbpsadbw $123, %ymm26, %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x03,0x1d,0x20,0x42,0xd2,0x7b] + vdbpsadbw $0x7b, %ymm26, %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, (%rcx), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x63,0x1d,0x20,0x42,0x11,0x7b] + vdbpsadbw $0x7b,(%rcx), %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, 4660(%rax,%r14,8), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x23,0x1d,0x20,0x42,0x94,0xf0,0x34,0x12,0x00,0x00,0x7b] + vdbpsadbw $0x7b,4660(%rax,%r14,8), %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, 4064(%rdx), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x63,0x1d,0x20,0x42,0x52,0x7f,0x7b] + vdbpsadbw $0x7b,4064(%rdx), %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, 4096(%rdx), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x63,0x1d,0x20,0x42,0x92,0x00,0x10,0x00,0x00,0x7b] + vdbpsadbw $0x7b,4096(%rdx), %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, -4096(%rdx), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x63,0x1d,0x20,0x42,0x52,0x80,0x7b] + vdbpsadbw $0x7b,-4096(%rdx), %ymm28, %ymm26 + +// CHECK: vdbpsadbw $123, -4128(%rdx), %ymm28, %ymm26 +// CHECK: encoding: [0x62,0x63,0x1d,0x20,0x42,0x92,0xe0,0xef,0xff,0xff,0x7b] + vdbpsadbw $0x7b,-4128(%rdx), %ymm28, %ymm26 +