Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -1607,6 +1607,10 @@ setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Legal); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom); setOperationAction(ISD::SELECT, MVT::v32i1, Custom); setOperationAction(ISD::SELECT, MVT::v64i1, Custom); setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); @@ -1618,6 +1622,8 @@ setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i1, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom); setOperationAction(ISD::VSELECT, MVT::v32i16, Legal); setOperationAction(ISD::VSELECT, MVT::v64i8, Legal); setOperationAction(ISD::TRUNCATE, MVT::v32i1, Custom); Index: llvm/trunk/lib/Target/X86/X86InstrAVX512.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td @@ -677,53 +677,36 @@ From.RC:$src1, imm:$idx)>; } -// This multiclass generates patterns for matching vextract with common types -// (X86VectorVTInfo From , X86VectorVTInfo To) and alternative types -// (X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo) -multiclass vextract_for_size_all : - vextract_for_size, - vextract_for_size_first_position_lowering { +// Codegen pattern for the alternative types +multiclass vextract_for_size_lowering p> : + vextract_for_size_first_position_lowering { - // Codegen pattern with the alternative types. - // Only add this if operation not supported natively via AVX512DQ - let Predicates = [NoDQI] in - def : Pat<(vextract_extract:$ext (AltFrom.VT AltFrom.RC:$src1), (iPTR imm)), - (AltTo.VT (!cast(NAME # To.EltSize # "x" # - To.NumElts # From.ZSuffix # "rr") - AltFrom.RC:$src1, - (EXTRACT_get_vextract_imm To.RC:$ext)))>; + let Predicates = p in + def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)), + (To.VT (!cast(InstrStr#"rr") + From.RC:$src1, + (EXTRACT_get_vextract_imm To.RC:$ext)))>; } multiclass vextract_for_type { - defm NAME # "32x4Z" : vextract_for_size_all { + defm NAME # "32x4Z" : vextract_for_size, X86VectorVTInfo< 4, EltVT32, VR128X>, - X86VectorVTInfo< 8, EltVT64, VR512>, - X86VectorVTInfo< 2, EltVT64, VR128X>, - vextract128_extract, - EXTRACT_get_vextract128_imm>, + vextract128_extract>, EVEX_V512, EVEX_CD8<32, CD8VT4>; - defm NAME # "64x4Z" : vextract_for_size_all, X86VectorVTInfo< 4, EltVT64, VR256X>, - X86VectorVTInfo<16, EltVT32, VR512>, - X86VectorVTInfo< 8, EltVT32, VR256>, - vextract256_extract, - EXTRACT_get_vextract256_imm>, + vextract256_extract>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>; let Predicates = [HasVLX] in - defm NAME # "32x4Z256" : vextract_for_size_all, X86VectorVTInfo< 4, EltVT32, VR128X>, - X86VectorVTInfo< 4, EltVT64, VR256X>, - X86VectorVTInfo< 2, EltVT64, VR128X>, - vextract128_extract, - EXTRACT_get_vextract128_imm>, + vextract128_extract>, EVEX_V256, EVEX_CD8<32, CD8VT4>; let Predicates = [HasVLX, HasDQI] in defm NAME # "64x2Z256" : vextract_for_size; defm VEXTRACTI : vextract_for_type; +// extract_subvector codegen patterns with the alternative types. +// Only add this if 64x2 and its friends are not supported natively via AVX512DQ. +defm : vextract_for_size_lowering<"VEXTRACTF32x4Z", v8f64_info, v2f64x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>; +defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v8i64_info, v2i64x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>; + +defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v16f32_info, v8f32x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>; +defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v16i32_info, v8i32x_info, + vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>; + +defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>; +defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>; + +// Codegen pattern with the alternative types extract VEC128 from VEC512 +defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>; +defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v64i8_info, v16i8x_info, + vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>; +// Codegen pattern with the alternative types extract VEC256 from VEC512 +defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info, + vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>; +defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info, + vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>; + // A 128-bit subvector insert to the first 512-bit vector position // is a subregister copy that needs no instruction. def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)), @@ -6940,6 +6951,118 @@ SSE_INTALU_ITINS_P, HasAVX512>; defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh, SSE_INTALU_ITINS_P, HasAVX512>; + +//===----------------------------------------------------------------------===// +// AVX-512 - Extract & Insert Integer Instructions +//===----------------------------------------------------------------------===// + +multiclass avx512_extract_elt_bw_m opc, string OpcodeStr, SDNode OpNode, + X86VectorVTInfo _> { + let mayStore = 1 in + def mr : AVX512Ii8, + EVEX, EVEX_CD8<_.EltSize, CD8VT1>; +} + +multiclass avx512_extract_elt_b { + let Predicates = [HasBWI] in { + def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst), + (ins _.RC:$src1, u8imm:$src2), + OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR32orGR64:$dst, + (X86pextrb (_.VT _.RC:$src1), imm:$src2))]>, + EVEX, TAPD; + + defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TAPD; + } +} + +multiclass avx512_extract_elt_w { + let Predicates = [HasBWI] in { + def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), + (ins _.RC:$src1, u8imm:$src2), + OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR32orGR64:$dst, + (X86pextrw (_.VT _.RC:$src1), imm:$src2))]>, + EVEX, PD; + + defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD; + } +} + +multiclass avx512_extract_elt_dq { + let Predicates = [HasDQI] in { + def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst), + (ins _.RC:$src1, u8imm:$src2), + OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GRC:$dst, + (extractelt (_.VT _.RC:$src1), imm:$src2))]>, + EVEX, TAPD; + + let mayStore = 1 in + def mr : AVX512Ii8<0x16, MRMDestMem, (outs), + (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2), + OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(store (extractelt (_.VT _.RC:$src1), + imm:$src2),addr:$dst)]>, + EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD; + } +} + +defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>; +defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>; +defm VPEXTRDZ : avx512_extract_elt_dq<"vpextrd", v4i32x_info, GR32>; +defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, VEX_W; + +multiclass avx512_insert_elt_m opc, string OpcodeStr, SDNode OpNode, + X86VectorVTInfo _, PatFrag LdFrag> { + def rm : AVX512Ii8, + EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>; +} + +multiclass avx512_insert_elt_bw opc, string OpcodeStr, SDNode OpNode, + X86VectorVTInfo _, PatFrag LdFrag> { + let Predicates = [HasBWI] in { + def rr : AVX512Ii8, EVEX_4V; + + defm NAME : avx512_insert_elt_m; + } +} + +multiclass avx512_insert_elt_dq opc, string OpcodeStr, + X86VectorVTInfo _, RegisterClass GRC> { + let Predicates = [HasDQI] in { + def rr : AVX512Ii8, + EVEX_4V, TAPD; + + defm NAME : avx512_insert_elt_m, TAPD; + } +} + +defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info, + extloadi8>, TAPD; +defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info, + extloadi16>, PD; +defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>; +defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, VEX_W; //===----------------------------------------------------------------------===// // VSHUFPS - VSHUFPD Operations //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -4611,7 +4611,7 @@ } // Extract -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoBWI] in def VPEXTRWri : Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2), "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -4626,7 +4626,7 @@ Sched<[WriteShuffleLd, ReadAfterLd]>; // Insert -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoBWI] in defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V; let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in @@ -6156,7 +6156,7 @@ imm:$src2)))), addr:$dst)]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoBWI] in defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX; defm PEXTRB : SS41I_extract8<0x14, "pextrb">; @@ -6181,7 +6181,7 @@ imm:$src2)))), addr:$dst)]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoBWI] in defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX; defm PEXTRW : SS41I_extract16<0x15, "pextrw">; @@ -6205,7 +6205,7 @@ addr:$dst)]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoDQI] in defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX; defm PEXTRD : SS41I_extract32<0x16, "pextrd">; @@ -6228,7 +6228,7 @@ addr:$dst)]>, REX_W; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoDQI] in defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W; defm PEXTRQ : SS41I_extract64<0x16, "pextrq">; @@ -6296,7 +6296,7 @@ imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoBWI] in defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V; let Constraints = "$src1 = $dst" in defm PINSRB : SS41I_insert8<0x20, "pinsrb">; @@ -6322,7 +6322,7 @@ imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoDQI] in defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V; let Constraints = "$src1 = $dst" in defm PINSRD : SS41I_insert32<0x22, "pinsrd">; @@ -6348,7 +6348,7 @@ imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>; } -let Predicates = [HasAVX] in +let Predicates = [HasAVX, NoDQI] in defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W; let Constraints = "$src1 = $dst" in defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W; Index: llvm/trunk/test/CodeGen/X86/avx-isa-check.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-isa-check.ll +++ llvm/trunk/test/CodeGen/X86/avx-isa-check.ll @@ -267,3 +267,59 @@ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> ret <16 x i16> %shuffle } + +define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) { + %r1 = extractelement <2 x i64> %x, i32 0 + %r2 = extractelement <2 x i64> %x, i32 1 + store i64 %r2, i64* %dst, align 1 + ret i64 %r1 +} + +define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) { + %r1 = extractelement <4 x i32> %x, i32 1 + %r2 = extractelement <4 x i32> %x, i32 3 + store i32 %r2, i32* %dst, align 1 + ret i32 %r1 +} + +define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) { + %r1 = extractelement <8 x i16> %x, i32 1 + %r2 = extractelement <8 x i16> %x, i32 3 + store i16 %r2, i16* %dst, align 1 + ret i16 %r1 +} + +define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) { + %r1 = extractelement <16 x i8> %x, i32 1 + %r2 = extractelement <16 x i8> %x, i32 3 + store i8 %r2, i8* %dst, align 1 + ret i8 %r1 +} + +define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) { + %val = load i64, i64* %ptr + %r1 = insertelement <2 x i64> %x, i64 %val, i32 1 + %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3 + ret <2 x i64> %r2 +} + +define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) { + %val = load i32, i32* %ptr + %r1 = insertelement <4 x i32> %x, i32 %val, i32 1 + %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3 + ret <4 x i32> %r2 +} + +define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) { + %val = load i16, i16* %ptr + %r1 = insertelement <8 x i16> %x, i16 %val, i32 1 + %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5 + ret <8 x i16> %r2 +} + +define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) { + %val = load i8, i8* %ptr + %r1 = insertelement <16 x i8> %x, i8 %val, i32 3 + %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10 + ret <16 x i8> %r2 +} Index: llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll +++ llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll @@ -0,0 +1,55 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX %s + + +define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind { +; SKX-LABEL: extract_subvector128_v32i16: +; SKX: ## BB#0: +; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0 +; SKX-NEXT: retq + %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> + ret <8 x i16> %r1 +} + +define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind { +; SKX-LABEL: extract_subvector128_v32i16_first_element: +; SKX: ## BB#0: +; SKX-NEXT: retq + %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> + ret <8 x i16> %r1 +} + +define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind { +; SKX-LABEL: extract_subvector128_v64i8: +; SKX: ## BB#0: +; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0 +; SKX-NEXT: retq + %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> + ret <16 x i8> %r1 +} + +define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind { +; SKX-LABEL: extract_subvector128_v64i8_first_element: +; SKX: ## BB#0: +; SKX-NEXT: retq + %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> + ret <16 x i8> %r1 +} + + +define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind { +; SKX-LABEL: extract_subvector256_v32i16: +; SKX: ## BB#0: +; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; SKX-NEXT: retq + %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <16 x i32> + ret <16 x i16> %r1 +} + +define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind { +; SKX-LABEL: extract_subvector256_v64i8: +; SKX: ## BB#0: +; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; SKX-NEXT: retq + %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <32 x i32> + ret <32 x i8> %r1 +} Index: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll +++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll @@ -231,6 +231,380 @@ ret i8 %x2 } +define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) { +; SKX-LABEL: extract_v8i64: +; SKX: ## BB#0: +; SKX-NEXT: vpextrq $1, %xmm0, %rax +; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm0 +; SKX-NEXT: vpextrq $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <8 x i64> %x, i32 1 + %r2 = extractelement <8 x i64> %x, i32 3 + store i64 %r2, i64* %dst, align 1 + ret i64 %r1 +} + +define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) { +; SKX-LABEL: extract_v4i64: +; SKX: ## BB#0: +; SKX-NEXT: vpextrq $1, %xmm0, %rax +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 +; SKX-NEXT: vpextrq $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <4 x i64> %x, i32 1 + %r2 = extractelement <4 x i64> %x, i32 3 + store i64 %r2, i64* %dst, align 1 + ret i64 %r1 +} + +define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) { +; SKX-LABEL: extract_v2i64: +; SKX: ## BB#0: +; SKX-NEXT: vmovq %xmm0, %rax +; SKX-NEXT: vpextrq $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <2 x i64> %x, i32 0 + %r2 = extractelement <2 x i64> %x, i32 1 + store i64 %r2, i64* %dst, align 1 + ret i64 %r1 +} + +define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) { +; SKX-LABEL: extract_v16i32: +; SKX: ## BB#0: +; SKX-NEXT: vpextrd $1, %xmm0, %eax +; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0 +; SKX-NEXT: vpextrd $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <16 x i32> %x, i32 1 + %r2 = extractelement <16 x i32> %x, i32 5 + store i32 %r2, i32* %dst, align 1 + ret i32 %r1 +} + +define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) { +; SKX-LABEL: extract_v8i32: +; SKX: ## BB#0: +; SKX-NEXT: vpextrd $1, %xmm0, %eax +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 +; SKX-NEXT: vpextrd $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <8 x i32> %x, i32 1 + %r2 = extractelement <8 x i32> %x, i32 5 + store i32 %r2, i32* %dst, align 1 + ret i32 %r1 +} + +define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) { +; SKX-LABEL: extract_v4i32: +; SKX: ## BB#0: +; SKX-NEXT: vpextrd $1, %xmm0, %eax +; SKX-NEXT: vpextrd $3, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <4 x i32> %x, i32 1 + %r2 = extractelement <4 x i32> %x, i32 3 + store i32 %r2, i32* %dst, align 1 + ret i32 %r1 +} + +define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) { +; SKX-LABEL: extract_v32i16: +; SKX: ## BB#0: +; SKX-NEXT: vpextrw $1, %xmm0, %eax +; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0 +; SKX-NEXT: vpextrw $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <32 x i16> %x, i32 1 + %r2 = extractelement <32 x i16> %x, i32 9 + store i16 %r2, i16* %dst, align 1 + ret i16 %r1 +} + +define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) { +; SKX-LABEL: extract_v16i16: +; SKX: ## BB#0: +; SKX-NEXT: vpextrw $1, %xmm0, %eax +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 +; SKX-NEXT: vpextrw $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <16 x i16> %x, i32 1 + %r2 = extractelement <16 x i16> %x, i32 9 + store i16 %r2, i16* %dst, align 1 + ret i16 %r1 +} + +define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) { +; SKX-LABEL: extract_v8i16: +; SKX: ## BB#0: +; SKX-NEXT: vpextrw $1, %xmm0, %eax +; SKX-NEXT: vpextrw $3, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <8 x i16> %x, i32 1 + %r2 = extractelement <8 x i16> %x, i32 3 + store i16 %r2, i16* %dst, align 1 + ret i16 %r1 +} + +define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) { +; SKX-LABEL: extract_v64i8: +; SKX: ## BB#0: +; SKX-NEXT: vpextrb $1, %xmm0, %eax +; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0 +; SKX-NEXT: vpextrb $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <64 x i8> %x, i32 1 + %r2 = extractelement <64 x i8> %x, i32 17 + store i8 %r2, i8* %dst, align 1 + ret i8 %r1 +} + +define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) { +; SKX-LABEL: extract_v32i8: +; SKX: ## BB#0: +; SKX-NEXT: vpextrb $1, %xmm0, %eax +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 +; SKX-NEXT: vpextrb $1, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <32 x i8> %x, i32 1 + %r2 = extractelement <32 x i8> %x, i32 17 + store i8 %r2, i8* %dst, align 1 + ret i8 %r1 +} + +define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) { +; SKX-LABEL: extract_v16i8: +; SKX: ## BB#0: +; SKX-NEXT: vpextrb $1, %xmm0, %eax +; SKX-NEXT: vpextrb $3, %xmm0, (%rdi) +; SKX-NEXT: retq + %r1 = extractelement <16 x i8> %x, i32 1 + %r2 = extractelement <16 x i8> %x, i32 3 + store i8 %r2, i8* %dst, align 1 + ret i8 %r1 +} + +define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) { +; SKX-LABEL: insert_v8i64: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm1 +; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1 +; SKX-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %val = load i64, i64* %ptr + %r1 = insertelement <8 x i64> %x, i64 %val, i32 1 + %r2 = insertelement <8 x i64> %r1, i64 %y, i32 3 + ret <8 x i64> %r2 +} + +define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) { +; SKX-LABEL: insert_v4i64: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1 +; SKX-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %val = load i64, i64* %ptr + %r1 = insertelement <4 x i64> %x, i64 %val, i32 1 + %r2 = insertelement <4 x i64> %r1, i64 %y, i32 3 + ret <4 x i64> %r2 +} + +define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) { +; SKX-LABEL: insert_v2i64: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm0 +; SKX-NEXT: vpinsrq $3, %rdi, %xmm0, %xmm0 +; SKX-NEXT: retq + %val = load i64, i64* %ptr + %r1 = insertelement <2 x i64> %x, i64 %val, i32 1 + %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3 + ret <2 x i64> %r2 +} + +define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) { +; SKX-LABEL: insert_v16i32: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1 +; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %val = load i32, i32* %ptr + %r1 = insertelement <16 x i32> %x, i32 %val, i32 1 + %r2 = insertelement <16 x i32> %r1, i32 %y, i32 5 + ret <16 x i32> %r2 +} + +define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) { +; KNL-LABEL: insert_v8i32: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1 +; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; KNL-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1 +; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v8i32: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %val = load i32, i32* %ptr + %r1 = insertelement <8 x i32> %x, i32 %val, i32 1 + %r2 = insertelement <8 x i32> %r1, i32 %y, i32 5 + ret <8 x i32> %r2 +} + +define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) { +; KNL-LABEL: insert_v4i32: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm0 +; KNL-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v4i32: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm0 +; SKX-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0 +; SKX-NEXT: retq + %val = load i32, i32* %ptr + %r1 = insertelement <4 x i32> %x, i32 %val, i32 1 + %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3 + ret <4 x i32> %r2 +} + +define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) { +; KNL-LABEL: insert_v32i16: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm2 +; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2 +; KNL-NEXT: vpinsrw $1, %edi, %xmm2, %xmm2 +; KNL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v32i16: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1 +; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %val = load i16, i16* %ptr + %r1 = insertelement <32 x i16> %x, i16 %val, i32 1 + %r2 = insertelement <32 x i16> %r1, i16 %y, i32 9 + ret <32 x i16> %r2 +} + +define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) { +; KNL-LABEL: insert_v16i16: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1 +; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; KNL-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1 +; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v16i16: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %val = load i16, i16* %ptr + %r1 = insertelement <16 x i16> %x, i16 %val, i32 1 + %r2 = insertelement <16 x i16> %r1, i16 %y, i32 9 + ret <16 x i16> %r2 +} + +define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) { +; KNL-LABEL: insert_v8i16: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm0 +; KNL-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v8i16: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm0 +; SKX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0 +; SKX-NEXT: retq + %val = load i16, i16* %ptr + %r1 = insertelement <8 x i16> %x, i16 %val, i32 1 + %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5 + ret <8 x i16> %r2 +} + +define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) { +; KNL-LABEL: insert_v64i8: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm2 +; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2 +; KNL-NEXT: vpinsrb $2, %edi, %xmm2, %xmm2 +; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v64i8: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm1 +; SKX-NEXT: vpinsrb $2, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $3, %xmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %val = load i8, i8* %ptr + %r1 = insertelement <64 x i8> %x, i8 %val, i32 1 + %r2 = insertelement <64 x i8> %r1, i8 %y, i32 50 + ret <64 x i8> %r2 +} + +define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) { +; SKX-LABEL: insert_v32i8: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1 +; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; SKX-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1 +; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %val = load i8, i8* %ptr + %r1 = insertelement <32 x i8> %x, i8 %val, i32 1 + %r2 = insertelement <32 x i8> %r1, i8 %y, i32 17 + ret <32 x i8> %r2 +} + +define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) { +; KNL-LABEL: insert_v16i8: +; KNL: ## BB#0: +; KNL-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 +; KNL-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0 +; KNL-NEXT: retq +; +; SKX-LABEL: insert_v16i8: +; SKX: ## BB#0: +; SKX-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 +; SKX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0 +; SKX-NEXT: retq + %val = load i8, i8* %ptr + %r1 = insertelement <16 x i8> %x, i8 %val, i32 3 + %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10 + ret <16 x i8> %r2 +} + define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) { ; KNL-LABEL: test_insert_128_v8i64: ; KNL: ## BB#0: Index: llvm/trunk/test/MC/X86/x86-64-avx512bw.s =================================================================== --- llvm/trunk/test/MC/X86/x86-64-avx512bw.s +++ llvm/trunk/test/MC/X86/x86-64-avx512bw.s @@ -4112,6 +4112,205 @@ // CHECK: encoding: [0x62,0x61,0x2d,0x40,0x69,0xb2,0xc0,0xdf,0xff,0xff] vpunpckhwd -8256(%rdx), %zmm26, %zmm30 +// CHECK: vpextrb $171, %xmm17, %eax +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0xab] + vpextrb $171, %xmm17, %eax + +// CHECK: vpextrb $123, %xmm17, %eax +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0x7b] + vpextrb $123, %xmm17, %eax + +// CHECK: vpextrb $123, %xmm17, %r8d +// CHECK: encoding: [0x62,0xc3,0x7d,0x08,0x14,0xc8,0x7b] + vpextrb $123, %xmm17,%r8d + +// CHECK: vpextrb $123, %xmm17, (%rcx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x09,0x7b] + vpextrb $123, %xmm17, (%rcx) + +// CHECK: vpextrb $123, %xmm17, 291(%rax,%r14,8) +// CHECK: encoding: [0x62,0xa3,0x7d,0x08,0x14,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpextrb $123, %xmm17, 291(%rax,%r14,8) + +// CHECK: vpextrb $123, %xmm17, 127(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x7f,0x7b] + vpextrb $123, %xmm17, 127(%rdx) + +// CHECK: vpextrb $123, %xmm17, 128(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x80,0x00,0x00,0x00,0x7b] + vpextrb $123, %xmm17, 128(%rdx) + +// CHECK: vpextrb $123, %xmm17, -128(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x80,0x7b] + vpextrb $123, %xmm17, -128(%rdx) + +// CHECK: vpextrb $123, %xmm17, -129(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x7f,0xff,0xff,0xff,0x7b] + vpextrb $123, %xmm17, -129(%rdx) +// CHECK: vpinsrb $171, %eax, %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0xab] + vpinsrb $171,%eax, %xmm25, %xmm25 + +// CHECK: vpinsrb $123, %eax, %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0x7b] + vpinsrb $123,%eax, %xmm25, %xmm25 + +// CHECK: vpinsrb $123, %ebp, %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xcd,0x7b] + vpinsrb $123,%ebp, %xmm25, %xmm25 + +// CHECK: vpinsrb $123, %r13d, %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x43,0x35,0x00,0x20,0xcd,0x7b] + vpinsrb $123,%r13d, %xmm25, %xmm25 + +// CHECK: vpinsrb $123, (%rcx), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x09,0x7b] + vpinsrb $123, (%rcx), %xmm25, %xmm25 + +// CHECK: vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x23,0x35,0x00,0x20,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25 + +// CHECK: vpinsrb $123, 127(%rdx), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x7f,0x7b] + vpinsrb $123, 127(%rdx), %xmm25, %xmm25 + +// CHECK: vpinsrb $123, 128(%rdx), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x80,0x00,0x00,0x00,0x7b] + vpinsrb $123, 128(%rdx), %xmm25, %xmm25 + +// CHECK: vpinsrb $123, -128(%rdx), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x80,0x7b] + vpinsrb $123, -128(%rdx), %xmm25, %xmm25 + +// CHECK: vpinsrb $123, -129(%rdx), %xmm25, %xmm25 +// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x7f,0xff,0xff,0xff,0x7b] + vpinsrb $123, -129(%rdx), %xmm25, %xmm25 + +// CHECK: vpinsrw $171, %eax, %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0xab] + vpinsrw $171,%eax, %xmm25, %xmm18 + +// CHECK: vpinsrw $123, %eax, %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0x7b] + vpinsrw $123,%eax, %xmm25, %xmm18 + +// CHECK: vpinsrw $123, %ebp, %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd5,0x7b] + vpinsrw $123,%ebp, %xmm25, %xmm18 + +// CHECK: vpinsrw $123, %r13d, %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xc1,0x35,0x00,0xc4,0xd5,0x7b] + vpinsrw $123,%r13d, %xmm25, %xmm18 + +// CHECK: vpinsrw $123, (%rcx), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x11,0x7b] + vpinsrw $123, (%rcx), %xmm25, %xmm18 + +// CHECK: vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xa1,0x35,0x00,0xc4,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18 + +// CHECK: vpinsrw $123, 254(%rdx), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x7f,0x7b] + vpinsrw $123, 254(%rdx), %xmm25, %xmm18 + +// CHECK: vpinsrw $123, 256(%rdx), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0x00,0x01,0x00,0x00,0x7b] + vpinsrw $123, 256(%rdx), %xmm25, %xmm18 + +// CHECK: vpinsrw $123, -256(%rdx), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x80,0x7b] + vpinsrw $123, -256(%rdx), %xmm25, %xmm18 + +// CHECK: vpinsrw $123, -258(%rdx), %xmm25, %xmm18 +// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0xfe,0xfe,0xff,0xff,0x7b] + vpinsrw $123, -258(%rdx), %xmm25, %xmm18 + +// CHECK: vpextrw $123, %xmm28, (%rcx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x21,0x7b] + vpextrw $123, %xmm28, (%rcx) + +// CHECK: vpextrw $123, %xmm28, 291(%rax,%r14,8) +// CHECK: encoding: [0x62,0x23,0x7d,0x08,0x15,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpextrw $123, %xmm28, 291(%rax,%r14,8) + +// CHECK: vpextrw $123, %xmm28, 254(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x7f,0x7b] + vpextrw $123, %xmm28, 254(%rdx) + +// CHECK: vpextrw $123, %xmm28, 256(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0x00,0x01,0x00,0x00,0x7b] + vpextrw $123, %xmm28, 256(%rdx) + +// CHECK: vpextrw $123, %xmm28, -256(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x80,0x7b] + vpextrw $123, %xmm28, -256(%rdx) + +// CHECK: vpextrw $123, %xmm28, -258(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0xfe,0xfe,0xff,0xff,0x7b] + vpextrw $123, %xmm28, -258(%rdx) + +// CHECK: vpextrw $171, %xmm30, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0xab] + vpextrw $171, %xmm30,%rax + +// CHECK: vpextrw $123, %xmm30, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0x7b] + vpextrw $123, %xmm30,%rax + +// CHECK: vpextrw $123, %xmm30, %r8d +// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc6,0x7b] + vpextrw $123, %xmm30,%r8 + +// CHECK: vpextrw $171, %xmm28, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab] + vpextrw $0xab, %xmm28, %eax + +// CHECK: vpextrw $123, %xmm28, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm28, %eax + +// CHECK: vpextrw $123, %xmm28, %r8d +// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm28, %r8d + +// CHECK: vpextrw $171, %xmm28, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab] + vpextrw $0xab, %xmm28, %eax + +// CHECK: vpextrw $123, %xmm28, %eax +// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm28, %eax + +// CHECK: vpextrw $123, %xmm28, %r8d +// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm28, %r8d + +// CHECK: vpextrw $171, %xmm20, %eax +// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0xab] + vpextrw $0xab, %xmm20, %eax + +// CHECK: vpextrw $123, %xmm20, %eax +// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm20, %eax + +// CHECK: vpextrw $123, %xmm20, %r8d +// CHECK: encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc4,0x7b] + vpextrw $0x7b, %xmm20, %r8d + +// CHECK: vpextrw $171, %xmm19, %eax +// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0xab] + vpextrw $0xab, %xmm19, %eax + +// CHECK: vpextrw $123, %xmm19, %eax +// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0x7b] + vpextrw $0x7b, %xmm19, %eax + +// CHECK: vpextrw $123, %xmm19, %r8d +// CHECK: encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc3,0x7b] + vpextrw $0x7b, %xmm19, %r8d + // CHECK: kunpckdq %k4, %k6, %k4 // CHECK: encoding: [0xc4,0xe1,0xcc,0x4b,0xe4] kunpckdq %k4, %k6, %k4 Index: llvm/trunk/test/MC/X86/x86-64-avx512dq.s =================================================================== --- llvm/trunk/test/MC/X86/x86-64-avx512dq.s +++ llvm/trunk/test/MC/X86/x86-64-avx512dq.s @@ -2443,6 +2443,310 @@ // CHECK: encoding: [0x62,0xa1,0xff,0xca,0x7a,0xd5] vcvtuqq2ps %zmm21, %ymm18 {%k2} {z} +// CHECK: vpextrd $171, %xmm28, %eax +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0xab] + vpextrd $0xab, %xmm28, %eax + +// CHECK: vpextrd $123, %xmm28, %eax +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0x7b] + vpextrd $0x7b, %xmm28, %eax + +// CHECK: vpextrd $123, %xmm28, %ebp +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe5,0x7b] + vpextrd $0x7b, %xmm28, %ebp + +// CHECK: vpextrd $123, %xmm28, %r13d +// CHECK: encoding: [0x62,0x43,0x7d,0x08,0x16,0xe5,0x7b] + vpextrd $0x7b, %xmm28, %r13d + +// CHECK: vpextrd $123, %xmm28, (%rcx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x21,0x7b] + vpextrd $0x7b, %xmm28, (%rcx) + +// CHECK: vpextrd $123, %xmm28, 291(%rax,%r14,8) +// CHECK: encoding: [0x62,0x23,0x7d,0x08,0x16,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpextrd $0x7b, %xmm28, 291(%rax,%r14,8) + +// CHECK: vpextrd $123, %xmm28, 508(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x7f,0x7b] + vpextrd $0x7b, %xmm28, 508(%rdx) + +// CHECK: vpextrd $123, %xmm28, 512(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b] + vpextrd $0x7b, %xmm28, 512(%rdx) + +// CHECK: vpextrd $123, %xmm28, -512(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x80,0x7b] + vpextrd $0x7b, %xmm28, -512(%rdx) + +// CHECK: vpextrd $123, %xmm28, -516(%rdx) +// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b] + vpextrd $0x7b, %xmm28, -516(%rdx) + +// CHECK: vpextrd $171, %xmm20, %eax +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0xab] + vpextrd $0xab, %xmm20, %eax + +// CHECK: vpextrd $123, %xmm20, %eax +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0x7b] + vpextrd $0x7b, %xmm20, %eax + +// CHECK: vpextrd $123, %xmm20, %ebp +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe5,0x7b] + vpextrd $0x7b, %xmm20, %ebp + +// CHECK: vpextrd $123, %xmm20, %r13d +// CHECK: encoding: [0x62,0xc3,0x7d,0x08,0x16,0xe5,0x7b] + vpextrd $0x7b, %xmm20, %r13d + +// CHECK: vpextrd $123, %xmm20, (%rcx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x21,0x7b] + vpextrd $0x7b, %xmm20, (%rcx) + +// CHECK: vpextrd $123, %xmm20, 4660(%rax,%r14,8) +// CHECK: encoding: [0x62,0xa3,0x7d,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpextrd $0x7b, %xmm20, 4660(%rax,%r14,8) + +// CHECK: vpextrd $123, %xmm20, 508(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x7f,0x7b] + vpextrd $0x7b, %xmm20, 508(%rdx) + +// CHECK: vpextrd $123, %xmm20, 512(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b] + vpextrd $0x7b, %xmm20, 512(%rdx) + +// CHECK: vpextrd $123, %xmm20, -512(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x80,0x7b] + vpextrd $0x7b, %xmm20, -512(%rdx) + +// CHECK: vpextrd $123, %xmm20, -516(%rdx) +// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b] + vpextrd $0x7b, %xmm20, -516(%rdx) + +// CHECK: vpextrq $171, %xmm24, %rax +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0xab] + vpextrq $0xab, %xmm24, %rax + +// CHECK: vpextrq $123, %xmm24, %rax +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0x7b] + vpextrq $0x7b, %xmm24, %rax + +// CHECK: vpextrq $123, %xmm24, %r8 +// CHECK: encoding: [0x62,0x43,0xfd,0x08,0x16,0xc0,0x7b] + vpextrq $0x7b, %xmm24, %r8 + +// CHECK: vpextrq $123, %xmm24, (%rcx) +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x01,0x7b] + vpextrq $0x7b, %xmm24, (%rcx) + +// CHECK: vpextrq $123, %xmm24, 291(%rax,%r14,8) +// CHECK: encoding: [0x62,0x23,0xfd,0x08,0x16,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpextrq $0x7b, %xmm24, 291(%rax,%r14,8) + +// CHECK: vpextrq $123, %xmm24, 1016(%rdx) +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x7f,0x7b] + vpextrq $0x7b, %xmm24, 1016(%rdx) + +// CHECK: vpextrq $123, %xmm24, 1024(%rdx) +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0x00,0x04,0x00,0x00,0x7b] + vpextrq $0x7b, %xmm24, 1024(%rdx) + +// CHECK: vpextrq $123, %xmm24, -1024(%rdx) +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x80,0x7b] + vpextrq $0x7b, %xmm24, -1024(%rdx) + +// CHECK: vpextrq $123, %xmm24, -1032(%rdx) +// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0xf8,0xfb,0xff,0xff,0x7b] + vpextrq $0x7b, %xmm24, -1032(%rdx) + +// CHECK: vpextrq $171, %xmm20, %rax +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0xab] + vpextrq $0xab, %xmm20, %rax + +// CHECK: vpextrq $123, %xmm20, %rax +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0x7b] + vpextrq $0x7b, %xmm20, %rax + +// CHECK: vpextrq $123, %xmm20, %r8 +// CHECK: encoding: [0x62,0xc3,0xfd,0x08,0x16,0xe0,0x7b] + vpextrq $0x7b, %xmm20, %r8 + +// CHECK: vpextrq $123, %xmm20, (%rcx) +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x21,0x7b] + vpextrq $0x7b, %xmm20, (%rcx) + +// CHECK: vpextrq $123, %xmm20, 4660(%rax,%r14,8) +// CHECK: encoding: [0x62,0xa3,0xfd,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpextrq $0x7b, %xmm20, 4660(%rax,%r14,8) + +// CHECK: vpextrq $123, %xmm20, 1016(%rdx) +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x7f,0x7b] + vpextrq $0x7b, %xmm20, 1016(%rdx) + +// CHECK: vpextrq $123, %xmm20, 1024(%rdx) +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0x00,0x04,0x00,0x00,0x7b] + vpextrq $0x7b, %xmm20, 1024(%rdx) + +// CHECK: vpextrq $123, %xmm20, -1024(%rdx) +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x80,0x7b] + vpextrq $0x7b, %xmm20, -1024(%rdx) + +// CHECK: vpextrq $123, %xmm20, -1032(%rdx) +// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0xf8,0xfb,0xff,0xff,0x7b] + vpextrq $0x7b, %xmm20, -1032(%rdx) + +// CHECK: vpinsrd $171, %eax, %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0xab] + vpinsrd $0xab,%eax, %xmm25, %xmm23 + +// CHECK: vpinsrd $123, %eax, %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0x7b] + vpinsrd $0x7b,%eax, %xmm25, %xmm23 + +// CHECK: vpinsrd $123, %ebp, %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xfd,0x7b] + vpinsrd $0x7b,%ebp, %xmm25, %xmm23 + +// CHECK: vpinsrd $123, %r13d, %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xc3,0x35,0x00,0x22,0xfd,0x7b] + vpinsrd $0x7b,%r13d, %xmm25, %xmm23 + +// CHECK: vpinsrd $123, (%rcx), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x39,0x7b] + vpinsrd $0x7b,(%rcx), %xmm25, %xmm23 + +// CHECK: vpinsrd $123, 291(%rax,%r14,8), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xa3,0x35,0x00,0x22,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpinsrd $0x7b,291(%rax,%r14,8), %xmm25, %xmm23 + +// CHECK: vpinsrd $123, 508(%rdx), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x7f,0x7b] + vpinsrd $0x7b,508(%rdx), %xmm25, %xmm23 + +// CHECK: vpinsrd $123, 512(%rdx), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0x00,0x02,0x00,0x00,0x7b] + vpinsrd $0x7b,512(%rdx), %xmm25, %xmm23 + +// CHECK: vpinsrd $123, -512(%rdx), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x80,0x7b] + vpinsrd $0x7b,-512(%rdx), %xmm25, %xmm23 + +// CHECK: vpinsrd $123, -516(%rdx), %xmm25, %xmm23 +// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0xfc,0xfd,0xff,0xff,0x7b] + vpinsrd $0x7b,-516(%rdx), %xmm25, %xmm23 + +// CHECK: vpinsrd $171, %eax, %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0xab] + vpinsrd $0xab,%eax, %xmm29, %xmm22 + +// CHECK: vpinsrd $123, %eax, %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0x7b] + vpinsrd $0x7b,%eax, %xmm29, %xmm22 + +// CHECK: vpinsrd $123, %ebp, %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf5,0x7b] + vpinsrd $0x7b,%ebp, %xmm29, %xmm22 + +// CHECK: vpinsrd $123, %r13d, %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xc3,0x15,0x00,0x22,0xf5,0x7b] + vpinsrd $0x7b,%r13d, %xmm29, %xmm22 + +// CHECK: vpinsrd $123, (%rcx), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x31,0x7b] + vpinsrd $0x7b,(%rcx), %xmm29, %xmm22 + +// CHECK: vpinsrd $123, 4660(%rax,%r14,8), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xa3,0x15,0x00,0x22,0xb4,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpinsrd $0x7b,4660(%rax,%r14,8), %xmm29, %xmm22 + +// CHECK: vpinsrd $123, 508(%rdx), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x7f,0x7b] + vpinsrd $0x7b,508(%rdx), %xmm29, %xmm22 + +// CHECK: vpinsrd $123, 512(%rdx), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0x00,0x02,0x00,0x00,0x7b] + vpinsrd $0x7b,512(%rdx), %xmm29, %xmm22 + +// CHECK: vpinsrd $123, -512(%rdx), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x80,0x7b] + vpinsrd $0x7b,-512(%rdx), %xmm29, %xmm22 + +// CHECK: vpinsrd $123, -516(%rdx), %xmm29, %xmm22 +// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0xfc,0xfd,0xff,0xff,0x7b] + vpinsrd $0x7b,-516(%rdx), %xmm29, %xmm22 + +// CHECK: vpinsrq $171, %rax, %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0xab] + vpinsrq $0xab,%rax, %xmm20, %xmm22 + +// CHECK: vpinsrq $123, %rax, %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0x7b] + vpinsrq $0x7b,%rax, %xmm20, %xmm22 + +// CHECK: vpinsrq $123, %r8, %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xc3,0xdd,0x00,0x22,0xf0,0x7b] + vpinsrq $0x7b,%r8, %xmm20, %xmm22 + +// CHECK: vpinsrq $123, (%rcx), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x31,0x7b] + vpinsrq $0x7b,(%rcx), %xmm20, %xmm22 + +// CHECK: vpinsrq $123, 291(%rax,%r14,8), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xa3,0xdd,0x00,0x22,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b] + vpinsrq $0x7b,291(%rax,%r14,8), %xmm20, %xmm22 + +// CHECK: vpinsrq $123, 1016(%rdx), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x7f,0x7b] + vpinsrq $0x7b,1016(%rdx), %xmm20, %xmm22 + +// CHECK: vpinsrq $123, 1024(%rdx), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0x00,0x04,0x00,0x00,0x7b] + vpinsrq $0x7b,1024(%rdx), %xmm20, %xmm22 + +// CHECK: vpinsrq $123, -1024(%rdx), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x80,0x7b] + vpinsrq $0x7b,-1024(%rdx), %xmm20, %xmm22 + +// CHECK: vpinsrq $123, -1032(%rdx), %xmm20, %xmm22 +// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0xf8,0xfb,0xff,0xff,0x7b] + vpinsrq $0x7b,-1032(%rdx), %xmm20, %xmm22 + +// CHECK: vpinsrq $171, %rax, %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0xab] + vpinsrq $0xab,%rax, %xmm19, %xmm25 + +// CHECK: vpinsrq $123, %rax, %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0x7b] + vpinsrq $0x7b,%rax, %xmm19, %xmm25 + +// CHECK: vpinsrq $123, %r8, %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x43,0xe5,0x00,0x22,0xc8,0x7b] + vpinsrq $0x7b,%r8, %xmm19, %xmm25 + +// CHECK: vpinsrq $123, (%rcx), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x09,0x7b] + vpinsrq $0x7b,(%rcx), %xmm19, %xmm25 + +// CHECK: vpinsrq $123, 4660(%rax,%r14,8), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x23,0xe5,0x00,0x22,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b] + vpinsrq $0x7b,4660(%rax,%r14,8), %xmm19, %xmm25 + +// CHECK: vpinsrq $123, 1016(%rdx), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x7f,0x7b] + vpinsrq $0x7b,1016(%rdx), %xmm19, %xmm25 + +// CHECK: vpinsrq $123, 1024(%rdx), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0x00,0x04,0x00,0x00,0x7b] + vpinsrq $0x7b,1024(%rdx), %xmm19, %xmm25 + +// CHECK: vpinsrq $123, -1024(%rdx), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x80,0x7b] + vpinsrq $0x7b,-1024(%rdx), %xmm19, %xmm25 + +// CHECK: vpinsrq $123, -1032(%rdx), %xmm19, %xmm25 +// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0xf8,0xfb,0xff,0xff,0x7b] + vpinsrq $0x7b,-1032(%rdx), %xmm19, %xmm25 + // CHECK: vinsertf32x8 $171, %ymm24, %zmm17, %zmm29 // CHECK: encoding: [0x62,0x03,0x75,0x40,0x1a,0xe8,0xab] vinsertf32x8 $0xab, %ymm24, %zmm17, %zmm29