Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1689,6 +1689,9 @@ for (auto VT : { MVT::v64i8, MVT::v32i16 }) { setOperationAction(ISD::BUILD_VECTOR, VT, Custom); setOperationAction(ISD::VSELECT, VT, Legal); + setOperationAction(ISD::SRL, VT, Custom); + setOperationAction(ISD::SHL, VT, Custom); + setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::AND, VT, Promote); AddPromotedToType (ISD::AND, VT, MVT::v8i64); @@ -18291,9 +18294,9 @@ if (!Subtarget->hasInt256() || VT.getScalarSizeInBits() < 16) return false; - // vXi16 supported only on AVX-512, BWI - if (VT.getScalarSizeInBits() == 16 && !Subtarget->hasBWI()) - return false; + // vXi16 supported only on AVX-512BW and VLX for 128/256bits + if (VT.getScalarSizeInBits() == 16) + return (Subtarget->hasBWI() && (VT.is512BitVector() || Subtarget->hasVLX())); if (VT.is512BitVector() || Subtarget->hasVLX()) return true; @@ -18358,7 +18361,9 @@ Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP()) return ArithmeticShiftRight64(ShiftAmt); - if (VT == MVT::v16i8 || (Subtarget->hasInt256() && VT == MVT::v32i8)) { + if (VT == MVT::v16i8 || + (Subtarget->hasInt256() && VT == MVT::v32i8) || + (Subtarget->hasBWI() && VT == MVT::v64i8)) { unsigned NumElts = VT.getVectorNumElements(); MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2); @@ -18382,10 +18387,8 @@ R, ShiftAmt, DAG); SHL = DAG.getBitcast(VT, SHL); // Zero out the rightmost bits. - SmallVector V( - NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, MVT::i8)); return DAG.getNode(ISD::AND, dl, VT, SHL, - DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V)); + DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT)); } if (Op.getOpcode() == ISD::SRL) { // Make a large shift. @@ -18393,18 +18396,14 @@ R, ShiftAmt, DAG); SRL = DAG.getBitcast(VT, SRL); // Zero out the leftmost bits. - SmallVector V( - NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, MVT::i8)); return DAG.getNode(ISD::AND, dl, VT, SRL, - DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V)); + DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT)); } if (Op.getOpcode() == ISD::SRA) { // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask) SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); - SmallVector V(NumElts, - DAG.getConstant(128 >> ShiftAmt, dl, - MVT::i8)); - SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V); + + SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT); Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); return Res; Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -4098,9 +4098,6 @@ //===---------------------------------------------------------------------===// let Predicates = [HasAVX, NoVLX] in { -defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli, - VR128, v8i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli, VR128, v4i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; @@ -4108,9 +4105,6 @@ VR128, v2i64, v2i64, bc_v2i64, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; -defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli, - VR128, v8i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli, VR128, v4i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; @@ -4118,13 +4112,23 @@ VR128, v2i64, v2i64, bc_v2i64, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; -defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai, - VR128, v8i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai, VR128, v4i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; -} // Predicates = [HasAVX] +} // Predicates = [HasAVX, NoVLX] + +let Predicates = [HasAVX, NoVLX_Or_NoBWI] in { +defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli, + VR128, v8i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; +defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli, + VR128, v8i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; +defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai, + VR128, v8i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; +} // Predicates = [HasAVX, NoVLX_Or_NoBWI] + let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] , Predicates = [HasAVX, NoVLX_Or_NoBWI]in { @@ -4145,9 +4149,6 @@ } // Predicates = [HasAVX, NoVLX_Or_NoBWI] let Predicates = [HasAVX2, NoVLX] in { -defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli, - VR256, v16i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli, VR256, v8i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; @@ -4155,9 +4156,6 @@ VR256, v4i64, v2i64, bc_v2i64, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; -defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli, - VR256, v16i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli, VR256, v8i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; @@ -4165,13 +4163,22 @@ VR256, v4i64, v2i64, bc_v2i64, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; -defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai, - VR256, v16i16, v8i16, bc_v8i16, loadv2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai, VR256, v8i32, v4i32, bc_v4i32, loadv2i64, SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; -}// Predicates = [HasAVX2] +}// Predicates = [HasAVX2, NoVLX] + +let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { +defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli, + VR256, v16i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; +defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli, + VR256, v16i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; +defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai, + VR256, v16i16, v8i16, bc_v8i16, loadv2i64, + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; +}// Predicates = [HasAVX2, NoVLX_Or_NoBWI] let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 , Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { Index: test/CodeGen/X86/avx-isa-check.ll =================================================================== --- test/CodeGen/X86/avx-isa-check.ll +++ test/CodeGen/X86/avx-isa-check.ll @@ -429,3 +429,62 @@ ret <4 x double> %shuffle } +define <16 x i16> @ashr_v16i16(<16 x i16> %a, <16 x i16> %b) { + %shift = ashr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i16> @lshr_v16i16(<16 x i16> %a, <16 x i16> %b) { + %shift = lshr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i16> @shl_v16i16(<16 x i16> %a, <16 x i16> %b) { + %shift = shl <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i16> @ashr_const_v16i16(<16 x i16> %a) { + %shift = ashr <16 x i16> %a, + ret <16 x i16> %shift +} + +define <16 x i16> @lshr_const_v16i16(<16 x i16> %a) { + %shift = lshr <16 x i16> %a, + ret <16 x i16> %shift +} + +define <16 x i16> @shl_const_v16i16(<16 x i16> %a) { + %shift = shl <16 x i16> %a, + ret <16 x i16> %shift +} + +define <8 x i16> @ashr_v8i16(<8 x i16> %a, <8 x i16> %b) { + %shift = ashr <8 x i16> %a, %b + ret <8 x i16> %shift +} + +define <8 x i16> @lshr_v8i16(<8 x i16> %a, <8 x i16> %b) { + %shift = lshr <8 x i16> %a, %b + ret <8 x i16> %shift +} + +define <8 x i16> @shl_v8i16(<8 x i16> %a, <8 x i16> %b) { + %shift = shl <8 x i16> %a, %b + ret <8 x i16> %shift +} + +define <8 x i16> @ashr_const_v8i16(<8 x i16> %a) { + %shift = ashr <8 x i16> %a, + ret <8 x i16> %shift +} + +define <8 x i16> @lshr_const_v8i16(<8 x i16> %a) { + %shift = lshr <8 x i16> %a, + ret <8 x i16> %shift +} + +define <8 x i16> @shl_const_v8i16(<8 x i16> %a) { + %shift = shl <8 x i16> %a, + ret <8 x i16> %shift +} Index: test/CodeGen/X86/vector-shift-ashr-512.ll =================================================================== --- test/CodeGen/X86/vector-shift-ashr-512.ll +++ test/CodeGen/X86/vector-shift-ashr-512.ll @@ -1,7 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; TODO: Add AVX512BW shift support ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ - +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW ; ; Variable Shifts ; @@ -25,84 +24,89 @@ } define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: var_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsravd %ymm5, %ymm6, %ymm5 -; ALL-NEXT: vpsrld $16, %ymm5, %ymm5 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsravd %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrld $16, %ymm0, %ymm0 -; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsravd %ymm2, %ymm5, %ymm2 -; ALL-NEXT: vpsrld $16, %ymm2, %ymm2 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsravd %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsrld $16, %ymm1, %ymm1 -; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsravd %ymm5, %ymm6, %ymm5 +; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsravd %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: var_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = ashr <32 x i16> %a, %b ret <32 x i16> %shift } define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: var_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] -; ALL-NEXT: vpsraw $4, %ymm5, %ymm6 -; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 -; ALL-NEXT: vpsraw $2, %ymm5, %ymm6 -; ALL-NEXT: vpaddw %ymm4, %ymm4, %ymm4 -; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 -; ALL-NEXT: vpsraw $1, %ymm5, %ymm6 -; ALL-NEXT: vpaddw %ymm4, %ymm4, %ymm4 -; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4 -; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] -; ALL-NEXT: vpsraw $4, %ymm0, %ymm5 -; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $2, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $1, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0 -; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $5, %ymm3, %ymm2 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; ALL-NEXT: vpsraw $4, %ymm4, %ymm5 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $2, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $1, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 -; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; ALL-NEXT: vpsraw $4, %ymm1, %ymm4 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $2, %ymm1, %ymm4 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $1, %ymm1, %ymm4 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1 -; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6 +; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6 +; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4 +; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %shift = ashr <64 x i8> %a, %b ret <64 x i8> %shift } @@ -134,65 +138,73 @@ } define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: splatvar_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vmovd %xmm2, %eax -; ALL-NEXT: movzwl %ax, %eax -; ALL-NEXT: vmovd %eax, %xmm2 -; ALL-NEXT: vpsraw %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vpsraw %xmm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovd %xmm2, %eax +; AVX512DQ-NEXT: movzwl %ax, %eax +; AVX512DQ-NEXT: vmovd %eax, %xmm2 +; AVX512DQ-NEXT: vpsraw %xmm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw %xmm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatvar_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vmovd %xmm1, %eax +; AVX512BW-NEXT: movzwl %ax, %eax +; AVX512BW-NEXT: vmovd %eax, %xmm1 +; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer %shift = ashr <32 x i16> %a, %splat ret <32 x i16> %shift } define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: splatvar_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpbroadcastb %xmm2, %ymm2 -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] -; ALL-NEXT: vpsraw $4, %ymm4, %ymm5 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $2, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $1, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm6, %ymm6, %ymm7 -; ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] -; ALL-NEXT: vpsraw $4, %ymm0, %ymm5 -; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $2, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm8 -; ALL-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $1, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm8, %ymm8, %ymm9 -; ALL-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0 -; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; ALL-NEXT: vpsraw $4, %ymm4, %ymm5 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 -; ALL-NEXT: vpsraw $2, %ymm3, %ymm4 -; ALL-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsraw $1, %ymm3, %ymm4 -; ALL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; ALL-NEXT: vpsraw $4, %ymm1, %ymm4 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $2, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $1, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1 -; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm6, %ymm6, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm8 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm8, %ymm8, %ymm9 +; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer %shift = ashr <64 x i8> %a, %splat ret <64 x i8> %shift @@ -221,77 +233,82 @@ } define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: constant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsravd %ymm4, %ymm5, %ymm5 -; ALL-NEXT: vpsrld $16, %ymm5, %ymm5 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsravd %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrld $16, %ymm0, %ymm0 -; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsravd %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsrld $16, %ymm3, %ymm3 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsravd %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrld $16, %ymm1, %ymm1 -; ALL-NEXT: vpackusdw %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsravd %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsravd %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: constant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = ashr <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: constant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] -; ALL-NEXT: vpsraw $4, %ymm4, %ymm5 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $2, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsraw $1, %ymm4, %ymm5 -; ALL-NEXT: vpaddw %ymm6, %ymm6, %ymm7 -; ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] -; ALL-NEXT: vpsraw $4, %ymm0, %ymm5 -; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $2, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm8 -; ALL-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $1, %ymm0, %ymm5 -; ALL-NEXT: vpaddw %ymm8, %ymm8, %ymm9 -; ALL-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0 -; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; ALL-NEXT: vpsraw $4, %ymm4, %ymm5 -; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 -; ALL-NEXT: vpsraw $2, %ymm3, %ymm4 -; ALL-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsraw $1, %ymm3, %ymm4 -; ALL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3 -; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; ALL-NEXT: vpsraw $4, %ymm1, %ymm4 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $2, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsraw $1, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1 -; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31] +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm6, %ymm6, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23] +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm8 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm8, %ymm8, %ymm9 +; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %shift = ashr <64 x i8> %a, ret <64 x i8> %shift } @@ -319,29 +336,43 @@ } define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpsraw $3, %ymm0, %ymm0 -; ALL-NEXT: vpsraw $3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsraw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = ashr <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0 -; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31] -; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; ALL-NEXT: vpxor %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsubb %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1 -; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpxor %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsubb %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512DQ-NEXT: vpxor %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v64i8: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512BW-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = ashr <64 x i8> %a, ret <64 x i8> %shift } Index: test/CodeGen/X86/vector-shift-lshr-512.ll =================================================================== --- test/CodeGen/X86/vector-shift-lshr-512.ll +++ test/CodeGen/X86/vector-shift-lshr-512.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; TODO: Add AVX512BW shift support ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW ; ; Variable Shifts @@ -25,63 +25,69 @@ } define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: var_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5 -; ALL-NEXT: vpsrld $16, %ymm5, %ymm5 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrld $16, %ymm0, %ymm0 -; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2 -; ALL-NEXT: vpsrld $16, %ymm2, %ymm2 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsrld $16, %ymm1, %ymm1 -; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5 +; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: var_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = lshr <32 x i16> %a, %b ret <32 x i16> %shift } define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: var_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsrlw $4, %ymm0, %ymm4 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; ALL-NEXT: vpand %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $2, %ymm0, %ymm4 -; ALL-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; ALL-NEXT: vpand %ymm6, %ymm4, %ymm4 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $1, %ymm0, %ymm4 -; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; ALL-NEXT: vpand %ymm7, %ymm4, %ymm4 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpsllw $5, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm6, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm4 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm4 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm4 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512DQ-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq + %shift = lshr <64 x i8> %a, %b ret <64 x i8> %shift } @@ -113,48 +119,56 @@ } define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: splatvar_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vmovd %xmm2, %eax -; ALL-NEXT: movzwl %ax, %eax -; ALL-NEXT: vmovd %eax, %xmm2 -; ALL-NEXT: vpsrlw %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw %xmm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovd %xmm2, %eax +; AVX512DQ-NEXT: movzwl %ax, %eax +; AVX512DQ-NEXT: vmovd %eax, %xmm2 +; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatvar_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vmovd %xmm1, %eax +; AVX512BW-NEXT: movzwl %ax, %eax +; AVX512BW-NEXT: vmovd %eax, %xmm1 +; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer %shift = lshr <32 x i16> %a, %splat ret <32 x i16> %shift } define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: splatvar_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpbroadcastb %xmm2, %ymm2 -; ALL-NEXT: vpsrlw $4, %ymm0, %ymm3 -; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $2, %ymm0, %ymm3 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; ALL-NEXT: vpand %ymm5, %ymm3, %ymm3 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $1, %ymm0, %ymm3 -; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; ALL-NEXT: vpand %ymm7, %ymm3, %ymm3 -; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm8 -; ALL-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $4, %ymm1, %ymm3 -; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer %shift = lshr <64 x i8> %a, %splat ret <64 x i8> %shift @@ -183,60 +197,65 @@ } define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: constant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5 -; ALL-NEXT: vpsrld $16, %ymm5, %ymm5 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrld $16, %ymm0, %ymm0 -; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsrld $16, %ymm3, %ymm3 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsrlvd %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrld $16, %ymm1, %ymm1 -; ALL-NEXT: vpackusdw %ymm3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: constant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = lshr <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: constant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] -; ALL-NEXT: vpsllw $5, %ymm4, %ymm4 -; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $2, %ymm0, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm4, %ymm4, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $1, %ymm0, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm8 -; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] +; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %shift = lshr <64 x i8> %a, ret <64 x i8> %shift } @@ -264,24 +283,35 @@ } define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = lshr <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0 -; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31] -; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1 -; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v64i8: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = lshr <64 x i8> %a, ret <64 x i8> %shift } Index: test/CodeGen/X86/vector-shift-shl-512.ll =================================================================== --- test/CodeGen/X86/vector-shift-shl-512.ll +++ test/CodeGen/X86/vector-shift-shl-512.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; TODO: Add AVX512BW shift support ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW ; ; Variable Shifts @@ -25,60 +25,65 @@ } define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: var_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsllvd %ymm5, %ymm6, %ymm5 -; ALL-NEXT: vpsrld $16, %ymm5, %ymm5 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsllvd %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsrld $16, %ymm0, %ymm0 -; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] -; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] -; ALL-NEXT: vpsllvd %ymm2, %ymm5, %ymm2 -; ALL-NEXT: vpsrld $16, %ymm2, %ymm2 -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] -; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] -; ALL-NEXT: vpsllvd %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsrld $16, %ymm1, %ymm1 -; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsllvd %ymm5, %ymm6, %ymm5 +; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15] +; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15] +; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11] +; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11] +; AVX512DQ-NEXT: vpsllvd %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: var_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, %b ret <32 x i16> %shift } define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: var_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsllw $4, %ymm0, %ymm4 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; ALL-NEXT: vpand %ymm5, %ymm4, %ymm4 -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $2, %ymm0, %ymm4 -; ALL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; ALL-NEXT: vpand %ymm6, %ymm4, %ymm4 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm4 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $4, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpsllw $5, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsllw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm6, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2 -; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: var_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm4 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %shift = shl <64 x i8> %a, %b ret <64 x i8> %shift } @@ -110,45 +115,54 @@ } define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind { -; ALL-LABEL: splatvar_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vmovd %xmm2, %eax -; ALL-NEXT: movzwl %ax, %eax -; ALL-NEXT: vmovd %eax, %xmm2 -; ALL-NEXT: vpsllw %xmm2, %ymm0, %ymm0 -; ALL-NEXT: vpsllw %xmm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovd %xmm2, %eax +; AVX512DQ-NEXT: movzwl %ax, %eax +; AVX512DQ-NEXT: vmovd %eax, %xmm2 +; AVX512DQ-NEXT: vpsllw %xmm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw %xmm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatvar_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vmovd %xmm1, %eax +; AVX512BW-NEXT: movzwl %ax, %eax +; AVX512BW-NEXT: vmovd %eax, %xmm1 +; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer %shift = shl <32 x i16> %a, %splat ret <32 x i16> %shift } define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { -; ALL-LABEL: splatvar_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpbroadcastb %xmm2, %ymm2 -; ALL-NEXT: vpsllw $4, %ymm0, %ymm3 -; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpsllw $5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $2, %ymm0, %ymm3 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; ALL-NEXT: vpand %ymm5, %ymm3, %ymm3 -; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm7 -; ALL-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $4, %ymm1, %ymm3 -; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3 -; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 -; ALL-NEXT: vpsllw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatvar_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq + %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer %shift = shl <64 x i8> %a, %splat ret <64 x i8> %shift @@ -177,42 +191,47 @@ } define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: constant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] -; ALL-NEXT: vpmullw %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpmullw %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: constant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: constant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsllw $4, %ymm0, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] -; ALL-NEXT: vpsllw $5, %ymm4, %ymm4 -; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $2, %ymm0, %ymm2 -; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpaddb %ymm4, %ymm4, %ymm6 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm7 -; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $4, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpsllw $2, %ymm1, %ymm2 -; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2 -; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2 -; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: constant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0] +; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq %shift = shl <64 x i8> %a, ret <64 x i8> %shift } @@ -240,24 +259,35 @@ } define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v32i16: -; ALL: ## BB#0: -; ALL-NEXT: vpsllw $3, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $3, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v32i16: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = shl <32 x i16> %a, ret <32 x i16> %shift } define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind { -; ALL-LABEL: splatconstant_shift_v64i8: -; ALL: ## BB#0: -; ALL-NEXT: vpsllw $3, %ymm0, %ymm0 -; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] -; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; ALL-NEXT: vpsllw $3, %ymm1, %ymm1 -; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1 -; ALL-NEXT: retq +; AVX512DQ-LABEL: splatconstant_shift_v64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: splatconstant_shift_v64i8: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq %shift = shl <64 x i8> %a, ret <64 x i8> %shift }