Index: lib/Target/X86/X86.td =================================================================== --- lib/Target/X86/X86.td +++ lib/Target/X86/X86.td @@ -327,6 +327,12 @@ : SubtargetFeature<"prefer-avx256", "PreferAVX256", "true", "Prefer 256-bit AVX instructions">; +// This feature is used in combination with prefer-avx256 to disable 512-bit +// instructions in the legalizer. +def FeatureNo512BitVectors + : SubtargetFeature<"no-512-bit-vectors", "No512BitVectors", "true", + "No 512-bit vectors present in function">; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1137,12 +1137,8 @@ } } - if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { - addRegisterClass(MVT::v16i32, &X86::VR512RegClass); - addRegisterClass(MVT::v16f32, &X86::VR512RegClass); - addRegisterClass(MVT::v8i64, &X86::VR512RegClass); - addRegisterClass(MVT::v8f64, &X86::VR512RegClass); - + if (!Subtarget.useSoftFloat() && + (Subtarget.use512BitOps() || Subtarget.hasVLX())) { addRegisterClass(MVT::v1i1, &X86::VK1RegClass); addRegisterClass(MVT::v8i1, &X86::VK8RegClass); addRegisterClass(MVT::v16i1, &X86::VK16RegClass); @@ -1189,6 +1185,13 @@ for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1, MVT::v32i1, MVT::v64i1 }) setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); + } + + if (!Subtarget.useSoftFloat() && Subtarget.use512BitOps()) { + addRegisterClass(MVT::v16i32, &X86::VR512RegClass); + addRegisterClass(MVT::v16f32, &X86::VR512RegClass); + addRegisterClass(MVT::v8i64, &X86::VR512RegClass); + addRegisterClass(MVT::v8f64, &X86::VR512RegClass); for (MVT VT : MVT::fp_vector_valuetypes()) setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal); @@ -1354,7 +1357,7 @@ }// has AVX-512 if (!Subtarget.useSoftFloat() && - (Subtarget.hasAVX512() || Subtarget.hasVLX())) { + (Subtarget.use512BitOps() || Subtarget.hasVLX())) { // These operations are handled on non-VLX by artificially widening in // isel patterns. // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? @@ -1406,14 +1409,11 @@ } } - if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { - addRegisterClass(MVT::v32i16, &X86::VR512RegClass); - addRegisterClass(MVT::v64i8, &X86::VR512RegClass); - + if (!Subtarget.useSoftFloat() && Subtarget.hasBWI() && + (Subtarget.use512BitOps() || Subtarget.hasVLX())) { addRegisterClass(MVT::v32i1, &X86::VK32RegClass); - addRegisterClass(MVT::v64i1, &X86::VK64RegClass); - for (auto VT : { MVT::v32i1, MVT::v64i1 }) { + for (auto VT : { MVT::v32i1 }) { setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::SUB, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); @@ -1429,14 +1429,39 @@ } setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); // Extends from v32i1 masks to 256-bit vectors. setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom); setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom); + } + + if (!Subtarget.useSoftFloat() && Subtarget.hasBWI() && + Subtarget.use512BitOps()) { + addRegisterClass(MVT::v32i16, &X86::VR512RegClass); + addRegisterClass(MVT::v64i8, &X86::VR512RegClass); + + addRegisterClass(MVT::v64i1, &X86::VK64RegClass); + + for (auto VT : { MVT::v64i1 }) { + setOperationAction(ISD::ADD, VT, Custom); + setOperationAction(ISD::SUB, VT, Custom); + setOperationAction(ISD::MUL, VT, Custom); + setOperationAction(ISD::VSELECT, VT, Expand); + + setOperationAction(ISD::TRUNCATE, VT, Custom); + setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::SELECT, VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + } + + setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); + // Extends from v64i1 masks to 512-bit vectors. setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom); setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); @@ -1503,7 +1528,7 @@ } if (!Subtarget.useSoftFloat() && Subtarget.hasBWI() && - (Subtarget.hasAVX512() || Subtarget.hasVLX())) { + (Subtarget.use512BitOps() || Subtarget.hasVLX())) { for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); @@ -22172,7 +22197,7 @@ // Only i8 vectors should need custom lowering after this. assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || - (VT == MVT::v64i8 && Subtarget.hasBWI())) && + (VT == MVT::v64i8 && Subtarget.use512BitBWOps())) && "Unsupported vector type"); // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply, @@ -22198,7 +22223,7 @@ SDValue Hi = DAG.getIntPtrConstant(NumElems / 2, dl); if (VT == MVT::v32i8) { - if (Subtarget.hasBWI()) { + if (Subtarget.hasBWI() && Subtarget.use512BitOps()) { SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v32i16, A); SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v32i16, B); SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB); @@ -22986,10 +23011,13 @@ // types, but without AVX512 the extra overheads to get from vXi8 to vXi32 // make the existing SSE solution better. if ((Subtarget.hasInt256() && VT == MVT::v8i16) || - (Subtarget.hasAVX512() && VT == MVT::v16i16) || - (Subtarget.hasAVX512() && VT == MVT::v16i8) || - (Subtarget.hasBWI() && VT == MVT::v32i8)) { - MVT EvtSVT = (VT == MVT::v32i8 ? MVT::i16 : MVT::i32); + (Subtarget.use512BitOps() && VT == MVT::v16i16) || + (Subtarget.use512BitOps() && VT == MVT::v16i8) || + (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8) || + (Subtarget.use512BitBWOps() && VT == MVT::v32i8)) { + assert((!Subtarget.hasBWI() || VT.getVectorElementType() == MVT::i8) && + "Unexpected vector type"); + MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32; MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements()); unsigned ExtOpc = Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; Index: lib/Target/X86/X86Subtarget.h =================================================================== --- lib/Target/X86/X86Subtarget.h +++ lib/Target/X86/X86Subtarget.h @@ -351,6 +351,9 @@ /// Prefer 256-bit AVX instructions over 512-bit instructions. bool PreferAVX256; + /// Indicates there are no 512-bit vectors present in the function. + bool No512BitVectors; + /// What processor and OS we're targeting. Triple TargetTriple; @@ -566,6 +569,16 @@ bool preferAVX256() const { return PreferAVX256; } + // If there are no 512-bit vectors and we prefer not to use 512-bit registers, + // disable them in the legalizer. + bool use512BitOps() const { + return hasAVX512() && !(PreferAVX256 && No512BitVectors); + } + + bool use512BitBWOps() const { + return hasBWI() && !(PreferAVX256 && No512BitVectors); + } + bool isXRaySupported() const override { return is64Bit(); } X86ProcFamilyEnum getProcFamily() const { return X86ProcFamily; } Index: lib/Target/X86/X86Subtarget.cpp =================================================================== --- lib/Target/X86/X86Subtarget.cpp +++ lib/Target/X86/X86Subtarget.cpp @@ -366,6 +366,7 @@ GatherOverhead = 1024; ScatterOverhead = 1024; PreferAVX256 = false; + No512BitVectors = false; } X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU, Index: test/CodeGen/X86/prefer-avx256-basic.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/prefer-avx256-basic.ll @@ -0,0 +1,850 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+prefer-avx256,+no-512-bit-vectors | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX256F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512bw,+avx512vl,+prefer-avx256,+no-512-bit-vectors | FileCheck %s --check-prefix=CHECK -check-prefix=AVX256 --check-prefix=AVX256VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512bw,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512DQ + +define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) { +; AVX256-LABEL: addpd512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vaddpd %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vaddpd %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: addpd512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vaddpd %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %add.i = fadd <8 x double> %x, %y + ret <8 x double> %add.i +} + +define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) { +; AVX256-LABEL: addps512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vaddps %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: addps512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vaddps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %add.i = fadd <16 x float> %x, %y + ret <16 x float> %add.i +} + +define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) { +; AVX256-LABEL: subpd512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vsubpd %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vsubpd %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: subpd512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vsubpd %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %sub.i = fsub <8 x double> %x, %y + ret <8 x double> %sub.i +} + +define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) { +; AVX256-LABEL: subps512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vsubps %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: subps512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vsubps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %sub.i = fsub <16 x float> %x, %y + ret <16 x float> %sub.i +} + +define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) { +; AVX256F-LABEL: imulq512: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpsrlq $32, %ymm2, %ymm4 +; AVX256F-NEXT: vpmuludq %ymm0, %ymm4, %ymm4 +; AVX256F-NEXT: vpsrlq $32, %ymm0, %ymm5 +; AVX256F-NEXT: vpmuludq %ymm5, %ymm2, %ymm5 +; AVX256F-NEXT: vpaddq %ymm4, %ymm5, %ymm4 +; AVX256F-NEXT: vpsllq $32, %ymm4, %ymm4 +; AVX256F-NEXT: vpmuludq %ymm0, %ymm2, %ymm0 +; AVX256F-NEXT: vpaddq %ymm4, %ymm0, %ymm0 +; AVX256F-NEXT: vpsrlq $32, %ymm3, %ymm2 +; AVX256F-NEXT: vpmuludq %ymm1, %ymm2, %ymm2 +; AVX256F-NEXT: vpsrlq $32, %ymm1, %ymm4 +; AVX256F-NEXT: vpmuludq %ymm4, %ymm3, %ymm4 +; AVX256F-NEXT: vpaddq %ymm2, %ymm4, %ymm2 +; AVX256F-NEXT: vpsllq $32, %ymm2, %ymm2 +; AVX256F-NEXT: vpmuludq %ymm1, %ymm3, %ymm1 +; AVX256F-NEXT: vpaddq %ymm2, %ymm1, %ymm1 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: imulq512: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpmullq %ymm0, %ymm2, %ymm0 +; AVX256VL-NEXT: vpmullq %ymm1, %ymm3, %ymm1 +; AVX256VL-NEXT: retq +; +; AVX512VL-LABEL: imulq512: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmullq %zmm0, %zmm1, %zmm0 +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: imulq512: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpsrlq $32, %zmm1, %zmm2 +; AVX512F-NEXT: vpmuludq %zmm0, %zmm2, %zmm2 +; AVX512F-NEXT: vpsrlq $32, %zmm0, %zmm3 +; AVX512F-NEXT: vpmuludq %zmm3, %zmm1, %zmm3 +; AVX512F-NEXT: vpaddq %zmm2, %zmm3, %zmm2 +; AVX512F-NEXT: vpsllq $32, %zmm2, %zmm2 +; AVX512F-NEXT: vpmuludq %zmm0, %zmm1, %zmm0 +; AVX512F-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: imulq512: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0 +; AVX512DQ-NEXT: retq + %z = mul <8 x i64>%x, %y + ret <8 x i64>%z +} + +define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) { +; AVX256F-LABEL: imulq256: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpsrlq $32, %ymm1, %ymm2 +; AVX256F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2 +; AVX256F-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX256F-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX256F-NEXT: vpaddq %ymm2, %ymm3, %ymm2 +; AVX256F-NEXT: vpsllq $32, %ymm2, %ymm2 +; AVX256F-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX256F-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: imulq256: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpmullq %ymm0, %ymm1, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512VL-LABEL: imulq256: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmullq %ymm0, %ymm1, %ymm0 +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: imulq256: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm2 +; AVX512F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX512F-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX512F-NEXT: vpaddq %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllq $32, %ymm2, %ymm2 +; AVX512F-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX512F-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: imulq256: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1 +; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0 +; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0 +; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0 +; AVX512DQ-NEXT: retq + %z = mul <4 x i64>%x, %y + ret <4 x i64>%z +} + +define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) { +; AVX256F-LABEL: imulq128: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpsrlq $32, %xmm1, %xmm2 +; AVX256F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2 +; AVX256F-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX256F-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX256F-NEXT: vpaddq %xmm2, %xmm3, %xmm2 +; AVX256F-NEXT: vpsllq $32, %xmm2, %xmm2 +; AVX256F-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX256F-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: imulq128: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpmullq %xmm0, %xmm1, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512VL-LABEL: imulq128: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmullq %xmm0, %xmm1, %xmm0 +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: imulq128: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2 +; AVX512F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2 +; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX512F-NEXT: vpaddq %xmm2, %xmm3, %xmm2 +; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2 +; AVX512F-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: imulq128: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1 +; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0 +; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0 +; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 +; AVX512DQ-NEXT: vzeroupper +; AVX512DQ-NEXT: retq + %z = mul <2 x i64>%x, %y + ret <2 x i64>%z +} + +define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) { +; AVX256-LABEL: mulpd512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vmulpd %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vmulpd %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: mulpd512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %mul.i = fmul <8 x double> %x, %y + ret <8 x double> %mul.i +} + +define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) { +; AVX256-LABEL: mulps512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vmulps %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: mulps512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmulps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %mul.i = fmul <16 x float> %x, %y + ret <16 x float> %mul.i +} + +define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) { +; AVX256-LABEL: divpd512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vdivpd %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vdivpd %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: divpd512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vdivpd %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %div.i = fdiv <8 x double> %x, %y + ret <8 x double> %div.i +} + +define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) { +; AVX256-LABEL: divps512: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vdivps %ymm0, %ymm2, %ymm0 +; AVX256-NEXT: vdivps %ymm1, %ymm3, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: divps512: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq +entry: + %div.i = fdiv <16 x float> %x, %y + ret <16 x float> %div.i +} + +define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone { +; AVX256-LABEL: vpaddq_test: +; AVX256: # %bb.0: +; AVX256-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpaddq %ymm3, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: vpaddq_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %x = add <8 x i64> %i, %j + ret <8 x i64> %x +} + +define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone { +; AVX256-LABEL: vpaddd_test: +; AVX256: # %bb.0: +; AVX256-NEXT: vpaddd %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpaddd %ymm3, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: vpaddd_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %x = add <16 x i32> %i, %j + ret <16 x i32> %x +} + +define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: vpaddd_mask_test: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256F-NEXT: vpcmpeqd %ymm6, %ymm5, %ymm5 +; AVX256F-NEXT: vpcmpeqd %ymm7, %ymm7, %ymm7 +; AVX256F-NEXT: vpxor %ymm7, %ymm5, %ymm5 +; AVX256F-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm4 +; AVX256F-NEXT: vpxor %ymm7, %ymm4, %ymm4 +; AVX256F-NEXT: vpaddd %ymm3, %ymm1, %ymm3 +; AVX256F-NEXT: vpaddd %ymm2, %ymm0, %ymm2 +; AVX256F-NEXT: vblendvps %ymm4, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: vblendvps %ymm5, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: vpaddd_mask_test: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256VL-NEXT: vpcmpneqd %ymm6, %ymm5, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm6, %ymm4, %k2 +; AVX256VL-NEXT: vpaddd %ymm2, %ymm0, %ymm0 {%k2} +; AVX256VL-NEXT: vpaddd %ymm3, %ymm1, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: vpaddd_mask_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = add <16 x i32> %i, %j + %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i + ret <16 x i32> %r +} + +define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: vpaddd_maskz_test: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256F-NEXT: vpcmpeqd %ymm6, %ymm5, %ymm5 +; AVX256F-NEXT: vpcmpeqd %ymm6, %ymm4, %ymm4 +; AVX256F-NEXT: vpaddd %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vpandn %ymm1, %ymm5, %ymm1 +; AVX256F-NEXT: vpaddd %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: vpandn %ymm0, %ymm4, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: vpaddd_maskz_test: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256VL-NEXT: vpcmpneqd %ymm6, %ymm5, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm6, %ymm4, %k2 +; AVX256VL-NEXT: vpaddd %ymm2, %ymm0, %ymm0 {%k2} {z} +; AVX256VL-NEXT: vpaddd %ymm3, %ymm1, %ymm1 {%k1} {z} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: vpaddd_maskz_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = add <16 x i32> %i, %j + %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer + ret <16 x i32> %r +} + +define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone { +; AVX256-LABEL: vpsubq_test: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsubq %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsubq %ymm3, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: vpsubq_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %x = sub <8 x i64> %i, %j + ret <8 x i64> %x +} + +define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone { +; AVX256-LABEL: vpsubd_test: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsubd %ymm3, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: vpsubd_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %x = sub <16 x i32> %i, %j + ret <16 x i32> %x +} + +define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) { +; AVX256-LABEL: vpmulld_test: +; AVX256: # %bb.0: +; AVX256-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpmulld %ymm3, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512-LABEL: vpmulld_test: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %x = mul <16 x i32> %i, %j + ret <16 x i32> %x +} + +define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) { +; AVX256-LABEL: andqbrst: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: vbroadcastsd (%rdi), %ymm2 +; AVX256-NEXT: vandps %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX256-NEXT: retq +; +; AVX512VL-LABEL: andqbrst: +; AVX512VL: # %bb.0: # %entry +; AVX512VL-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0 +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: andqbrst: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: andqbrst: +; AVX512DQ: # %bb.0: # %entry +; AVX512DQ-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq +entry: + %a = load i64, i64* %ap, align 8 + %b = insertelement <8 x i64> undef, i64 %a, i32 0 + %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer + %d = and <8 x i64> %p1, %c + ret <8 x i64>%d +} + +define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vaddps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vaddps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vaddps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vaddps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vaddps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vaddps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vaddps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = fadd <16 x float> %i, %j + %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst + ret <16 x float> %r +} + +define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vmulps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vmulps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vmulps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vmulps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vmulps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vmulps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vmulps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = fmul <16 x float> %i, %j + %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst + ret <16 x float> %r +} + +define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vminps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vminps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vminps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vminps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vminps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vminps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vminps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %cmp_res = fcmp olt <16 x float> %i, %j + %min = select <16 x i1> %cmp_res, <16 x float> %i, <16 x float> %j + %r = select <16 x i1> %mask, <16 x float> %min, <16 x float> %dst + ret <16 x float> %r +} + +define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vminpd: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm7, %xmm7, %xmm7 +; AVX256F-NEXT: vpcmpeqd %ymm7, %ymm6, %ymm6 +; AVX256F-NEXT: vpcmpeqd %ymm7, %ymm7, %ymm7 +; AVX256F-NEXT: vpxor %ymm7, %ymm6, %ymm6 +; AVX256F-NEXT: vextracti128 $1, %ymm6, %xmm7 +; AVX256F-NEXT: vpmovsxdq %xmm7, %ymm7 +; AVX256F-NEXT: vpmovsxdq %xmm6, %ymm6 +; AVX256F-NEXT: vminpd %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvpd %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vminpd %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvpd %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vminpd: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vextracti128 $1, %ymm6, %xmm7 +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %xmm8, %xmm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %xmm8, %xmm6, %k2 +; AVX256VL-NEXT: vminpd %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vminpd %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512VL-LABEL: test_mask_vminpd: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 +; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: test_mask_vminpd: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 +; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: test_mask_vminpd: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 +; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512DQ-NEXT: retq + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %cmp_res = fcmp olt <8 x double> %i, %j + %min = select <8 x i1> %cmp_res, <8 x double> %i, <8 x double> %j + %r = select <8 x i1> %mask, <8 x double> %min, <8 x double> %dst + ret <8 x double> %r +} + +define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vmaxps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vmaxps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vmaxps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vmaxps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vmaxps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vmaxps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vmaxps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %cmp_res = fcmp ogt <16 x float> %i, %j + %max = select <16 x i1> %cmp_res, <16 x float> %i, <16 x float> %j + %r = select <16 x i1> %mask, <16 x float> %max, <16 x float> %dst + ret <16 x float> %r +} + +define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vmaxpd: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm7, %xmm7, %xmm7 +; AVX256F-NEXT: vpcmpeqd %ymm7, %ymm6, %ymm6 +; AVX256F-NEXT: vpcmpeqd %ymm7, %ymm7, %ymm7 +; AVX256F-NEXT: vpxor %ymm7, %ymm6, %ymm6 +; AVX256F-NEXT: vextracti128 $1, %ymm6, %xmm7 +; AVX256F-NEXT: vpmovsxdq %xmm7, %ymm7 +; AVX256F-NEXT: vpmovsxdq %xmm6, %ymm6 +; AVX256F-NEXT: vmaxpd %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvpd %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vmaxpd %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvpd %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vmaxpd: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vextracti128 $1, %ymm6, %xmm7 +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %xmm8, %xmm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %xmm8, %xmm6, %k2 +; AVX256VL-NEXT: vmaxpd %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vmaxpd %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512VL-LABEL: test_mask_vmaxpd: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 +; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512VL-NEXT: retq +; +; AVX512F-LABEL: test_mask_vmaxpd: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 +; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512F-NEXT: retq +; +; AVX512DQ-LABEL: test_mask_vmaxpd: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3 +; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512DQ-NEXT: retq + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %cmp_res = fcmp ogt <8 x double> %i, %j + %max = select <8 x i1> %cmp_res, <8 x double> %i, <8 x double> %j + %r = select <8 x i1> %mask, <8 x double> %max, <8 x double> %dst + ret <8 x double> %r +} + +define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vsubps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vsubps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vsubps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vsubps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vsubps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vsubps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vsubps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = fsub <16 x float> %i, %j + %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst + ret <16 x float> %r +} + +define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vdivps: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vdivps %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvps %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vdivps %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvps %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vdivps: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqd %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vdivps %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vdivps %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vdivps: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 +; AVX512-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <16 x i32> %mask1, zeroinitializer + %x = fdiv <16 x float> %i, %j + %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst + ret <16 x float> %r +} + +define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone { +; AVX256F-LABEL: test_mask_vaddpd: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256F-NEXT: vpcmpeqq %ymm8, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqd %ymm9, %ymm9, %ymm9 +; AVX256F-NEXT: vpxor %ymm9, %ymm7, %ymm7 +; AVX256F-NEXT: vpcmpeqq %ymm8, %ymm6, %ymm6 +; AVX256F-NEXT: vpxor %ymm9, %ymm6, %ymm6 +; AVX256F-NEXT: vaddpd %ymm5, %ymm3, %ymm3 +; AVX256F-NEXT: vblendvpd %ymm7, %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vaddpd %ymm4, %ymm2, %ymm2 +; AVX256F-NEXT: vblendvpd %ymm6, %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_mask_vaddpd: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX256VL-NEXT: vpcmpneqq %ymm8, %ymm7, %k1 +; AVX256VL-NEXT: vpcmpneqq %ymm8, %ymm6, %k2 +; AVX256VL-NEXT: vaddpd %ymm4, %ymm2, %ymm0 {%k2} +; AVX256VL-NEXT: vaddpd %ymm5, %ymm3, %ymm1 {%k1} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_mask_vaddpd: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vpcmpneqq %zmm4, %zmm3, %k1 +; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %x = fadd <8 x double> %i, %j + %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %dst + ret <8 x double> %r +} + +define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone { +; AVX256F-LABEL: test_maskz_vaddpd: +; AVX256F: # %bb.0: +; AVX256F-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256F-NEXT: vpcmpeqq %ymm6, %ymm5, %ymm5 +; AVX256F-NEXT: vpcmpeqq %ymm6, %ymm4, %ymm4 +; AVX256F-NEXT: vaddpd %ymm3, %ymm1, %ymm1 +; AVX256F-NEXT: vpandn %ymm1, %ymm5, %ymm1 +; AVX256F-NEXT: vaddpd %ymm2, %ymm0, %ymm0 +; AVX256F-NEXT: vpandn %ymm0, %ymm4, %ymm0 +; AVX256F-NEXT: retq +; +; AVX256VL-LABEL: test_maskz_vaddpd: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX256VL-NEXT: vpcmpneqq %ymm6, %ymm5, %k1 +; AVX256VL-NEXT: vpcmpneqq %ymm6, %ymm4, %k2 +; AVX256VL-NEXT: vaddpd %ymm2, %ymm0, %ymm0 {%k2} {z} +; AVX256VL-NEXT: vaddpd %ymm3, %ymm1, %ymm1 {%k1} {z} +; AVX256VL-NEXT: retq +; +; AVX512-LABEL: test_maskz_vaddpd: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vpcmpneqq %zmm3, %zmm2, %k1 +; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: retq + %mask = icmp ne <8 x i64> %mask1, zeroinitializer + %x = fadd <8 x double> %i, %j + %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer + ret <8 x double> %r +} Index: test/CodeGen/X86/prefer-avx256-shift.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/prefer-avx256-shift.ll @@ -0,0 +1,419 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+prefer-avx256,+no-512-bit-vectors | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-avx256,+no-512-bit-vectors | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL + +define <32 x i8> @var_shl_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_shl_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = shl <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_shl_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_shl_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_shl_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsllvd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = shl <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_shl_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_shl_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_shl_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsllw $4, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsllw $2, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = shl <16 x i8> %a, %b + ret <16 x i8> %shift +} + +define <32 x i8> @var_lshr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_lshr_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpsrlw $4, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $2, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $1, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = lshr <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_lshr_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_lshr_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_lshr_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = lshr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_lshr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_lshr_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_lshr_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsrlw $4, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = lshr <16 x i8> %a, %b + ret <16 x i8> %shift +} + +define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_ashr_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX256-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX256-NEXT: vpsraw $4, %ymm3, %ymm4 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; AVX256-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX256-NEXT: vpsraw $4, %ymm0, %ymm3 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsraw $2, %ymm0, %ymm3 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsraw $1, %ymm0, %ymm3 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX256-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 +; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = ashr <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_ashr_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_ashr_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_ashr_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsravd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = ashr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_ashr_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX256BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_ashr_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX256VL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX256VL-NEXT: vpsraw $4, %xmm3, %xmm4 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 +; AVX256VL-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX256VL-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX256VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX512BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = ashr <16 x i8> %a, %b + ret <16 x i8> %shift +} Index: test/CodeGen/X86/prefer-avx256-wide-mul.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/prefer-avx256-wide-mul.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-avx256,+no-512-bit-vectors | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW + +define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { +; AVX256BW-LABEL: test_div7_32i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX256BW-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX256BW-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX256BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX256BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX256BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX256BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX256BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX256BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX256BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX512BW-LABEL: test_div7_32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: retq + %res = udiv <32 x i8> %a, + ret <32 x i8> %res +} + +define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { +; AVX256BW-LABEL: test_div7_64i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX256BW-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX256BW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX256BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpmullw %ymm3, %ymm4, %ymm4 +; AVX256BW-NEXT: vpsrlw $8, %ymm4, %ymm4 +; AVX256BW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] +; AVX256BW-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX256BW-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX256BW-NEXT: vpsubb %ymm2, %ymm0, %ymm0 +; AVX256BW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX256BW-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX256BW-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX256BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0 +; AVX256BW-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX256BW-NEXT: vmovdqa {{.*#+}} ymm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX256BW-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX256BW-NEXT: vextracti128 $1, %ymm1, %xmm5 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero +; AVX256BW-NEXT: vpmullw %ymm3, %ymm5, %ymm5 +; AVX256BW-NEXT: vpsrlw $8, %ymm5, %ymm5 +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmullw %ymm3, %ymm6, %ymm3 +; AVX256BW-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX256BW-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm5[2,3] +; AVX256BW-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3 +; AVX256BW-NEXT: vpackuswb %ymm6, %ymm3, %ymm3 +; AVX256BW-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX256BW-NEXT: vpsrlw $1, %ymm1, %ymm1 +; AVX256BW-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX256BW-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX256BW-NEXT: vpsrlw $2, %ymm1, %ymm1 +; AVX256BW-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX256BW-NEXT: retq +; +; AVX512BW-LABEL: test_div7_64i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero +; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2 +; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: retq + %res = udiv <64 x i8> %a, + ret <64 x i8> %res +}