Index: lib/Target/X86/CMakeLists.txt =================================================================== --- lib/Target/X86/CMakeLists.txt +++ lib/Target/X86/CMakeLists.txt @@ -55,6 +55,7 @@ X86TargetObjectFile.cpp X86TargetTransformInfo.cpp X86VZeroUpper.cpp + X86VectorWidthInfer.cpp X86WinAllocaExpander.cpp X86WinEHState.cpp X86CallingConv.cpp Index: lib/Target/X86/X86.h =================================================================== --- lib/Target/X86/X86.h +++ lib/Target/X86/X86.h @@ -108,6 +108,12 @@ void initializeEvexToVexInstPassPass(PassRegistry &); +/// This pass tries to infer a required vector width for a function if the +/// require-vector-width attribute isn't present. +FunctionPass *createX86VectorWidthInferPass(); + +void initializeX86VectorWidthInferPass(PassRegistry &); + } // End llvm namespace #endif Index: lib/Target/X86/X86.td =================================================================== --- lib/Target/X86/X86.td +++ lib/Target/X86/X86.td @@ -329,6 +329,16 @@ : SubtargetFeature<"fast-gather", "HasFastGather", "true", "Indicates if gather is reasonably fast.">; +def FeaturePreferVecWidth256 + : SubtargetFeature<"prefer-vector-width-256", "PreferVecWidth256", "true", + "Prefer 256-bit AVX instructions">; + +// This feature is used in combination with prefer-avx256 to disable 512-bit +// instructions in the legalizer. +def FeatureNo512BitVectors + : SubtargetFeature<"no-512-bit-vectors", "No512BitVectors", "true", + "No 512-bit vectors present in function">; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1138,11 +1138,6 @@ } if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { - addRegisterClass(MVT::v16i32, &X86::VR512RegClass); - addRegisterClass(MVT::v16f32, &X86::VR512RegClass); - addRegisterClass(MVT::v8i64, &X86::VR512RegClass); - addRegisterClass(MVT::v8f64, &X86::VR512RegClass); - addRegisterClass(MVT::v1i1, &X86::VK1RegClass); addRegisterClass(MVT::v8i1, &X86::VK8RegClass); addRegisterClass(MVT::v16i1, &X86::VK16RegClass); @@ -1189,6 +1184,13 @@ for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1, MVT::v32i1, MVT::v64i1 }) setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); + } + + if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) { + addRegisterClass(MVT::v16i32, &X86::VR512RegClass); + addRegisterClass(MVT::v16f32, &X86::VR512RegClass); + addRegisterClass(MVT::v8i64, &X86::VR512RegClass); + addRegisterClass(MVT::v8f64, &X86::VR512RegClass); for (MVT VT : MVT::fp_vector_valuetypes()) setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal); @@ -1353,8 +1355,7 @@ } }// has AVX-512 - if (!Subtarget.useSoftFloat() && - (Subtarget.hasAVX512() || Subtarget.hasVLX())) { + if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { // These operations are handled on non-VLX by artificially widening in // isel patterns. // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? @@ -1407,13 +1408,9 @@ } if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { - addRegisterClass(MVT::v32i16, &X86::VR512RegClass); - addRegisterClass(MVT::v64i8, &X86::VR512RegClass); - addRegisterClass(MVT::v32i1, &X86::VK32RegClass); - addRegisterClass(MVT::v64i1, &X86::VK64RegClass); - for (auto VT : { MVT::v32i1, MVT::v64i1 }) { + for (auto VT : { MVT::v32i1 }) { setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::SUB, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); @@ -1429,14 +1426,38 @@ } setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); // Extends from v32i1 masks to 256-bit vectors. setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom); setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom); + } + + if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) { + addRegisterClass(MVT::v32i16, &X86::VR512RegClass); + addRegisterClass(MVT::v64i8, &X86::VR512RegClass); + + addRegisterClass(MVT::v64i1, &X86::VK64RegClass); + + for (auto VT : { MVT::v64i1 }) { + setOperationAction(ISD::ADD, VT, Custom); + setOperationAction(ISD::SUB, VT, Custom); + setOperationAction(ISD::MUL, VT, Custom); + setOperationAction(ISD::VSELECT, VT, Expand); + + setOperationAction(ISD::TRUNCATE, VT, Custom); + setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::SELECT, VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + } + + setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); + // Extends from v64i1 masks to 512-bit vectors. setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom); setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); @@ -1502,8 +1523,7 @@ } } - if (!Subtarget.useSoftFloat() && Subtarget.hasBWI() && - (Subtarget.hasAVX512() || Subtarget.hasVLX())) { + if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); @@ -14224,10 +14244,14 @@ ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64; break; case MVT::v16i1: - ExtVT = MVT::v16i32; + // Take 512-bit type, unless we are forbidden to use 512-bit types. + ExtVT = DAG.getTargetLoweringInfo().isTypeLegal(MVT::v16i32) ? MVT::v16i32 + : MVT::v16i16; break; case MVT::v32i1: - ExtVT = MVT::v32i16; + // Take 512-bit type, unless we are forbidden to use 512-bit types. + ExtVT = DAG.getTargetLoweringInfo().isTypeLegal(MVT::v32i16) ? MVT::v32i16 + : MVT::v32i8; break; case MVT::v64i1: ExtVT = MVT::v64i8; @@ -16261,8 +16285,21 @@ // Extend VT if the scalar type is v8/v16 and BWI is not supported. MVT ExtVT = VT; if (!Subtarget.hasBWI() && - (VT.getVectorElementType().getSizeInBits() <= 16)) + (VT.getVectorElementType().getSizeInBits() <= 16)) { + // If v16i32 isn't legal we'll need to split and concatenate. + if (NumElts == 16 && !Subtarget.useAVX512Regs()) { + SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i1, In, + DAG.getIntPtrConstant(0, DL)); + SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i1, In, + DAG.getIntPtrConstant(8, DL)); + MVT SplitVT = MVT::getVectorVT(VT.getVectorElementType(), 8); + Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, SplitVT, Lo); + Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, SplitVT, Hi); + return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); + } + ExtVT = MVT::getVectorVT(MVT::i32, NumElts); + } // Widen to 512-bits if VLX is not supported. MVT WideVT = ExtVT; @@ -16432,6 +16469,23 @@ assert((InVT.is256BitVector() || InVT.is128BitVector()) && "Unexpected vector type."); unsigned NumElts = InVT.getVectorNumElements(); + if (NumElts == 16 && !Subtarget.useAVX512Regs()) { + assert(Subtarget.hasVLX() && "Can't use 512-bit registers or VLX?"); + // If we can't use 512-bit ops we'll need to split this to use + // MVT::v8i32 and concat the result. + if (InVT == MVT::v16i8) { + // First we need to sign extend up to 256-bits so we can split that. + InVT = MVT::v16i16; + In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In); + } + SDValue Lo = extract128BitVector(In, 0, DAG, DL); + SDValue Hi = extract128BitVector(In, 8, DAG, DL); + // We're split now, just emit two truncates and a concat. The two + // truncates will trigger legalization to come back to this function. + Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo); + Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi); + return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); + } MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts); MVT ExtVT = MVT::getVectorVT(EltVT, NumElts); In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In); @@ -16460,10 +16514,14 @@ // vpmovqb/w/d, vpmovdb/w, vpmovwb if (Subtarget.hasAVX512()) { // word to byte only under BWI - if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8 - return DAG.getNode(X86ISD::VTRUNC, DL, VT, - getExtendInVec(X86ISD::VSEXT, DL, MVT::v16i32, In, DAG)); - return DAG.getNode(X86ISD::VTRUNC, DL, VT, In); + if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) { // v16i16 -> v16i8 + if (Subtarget.useAVX512Regs()) + return DAG.getNode(X86ISD::VTRUNC, DL, VT, + getExtendInVec(X86ISD::VSEXT, DL, MVT::v16i32, In, + DAG)); + } else { + return DAG.getNode(X86ISD::VTRUNC, DL, VT, In); + } } // Truncate with PACKSS if we are truncating a vector with sign-bits that @@ -18401,8 +18459,21 @@ // Extend VT if the scalar type is v8/v16 and BWI is not supported. MVT ExtVT = VT; - if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) + if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) { + // If v16i32 isn't legal we'll need to split and concatenate. + if (NumElts == 16 && !Subtarget.useAVX512Regs()) { + SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In, + DAG.getIntPtrConstant(0, dl)); + SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In, + DAG.getIntPtrConstant(8, dl)); + MVT SplitVT = MVT::getVectorVT(VTElt, 8); + Lo = DAG.getNode(ISD::SIGN_EXTEND, dl, SplitVT, Lo); + Hi = DAG.getNode(ISD::SIGN_EXTEND, dl, SplitVT, Hi); + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); + } + ExtVT = MVT::getVectorVT(MVT::i32, NumElts); + } // Widen to 512-bits if VLX is not supported. MVT WideVT = ExtVT; @@ -21679,39 +21750,6 @@ return LowerVectorIntUnary(Op, DAG); } -/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction. -// -// i8/i16 vector implemented using dword LZCNT vector instruction -// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal, -// split the vector, perform operation on it's Lo a Hi part and -// concatenate the results. -static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) { - assert(Op.getOpcode() == ISD::CTLZ); - SDLoc dl(Op); - MVT VT = Op.getSimpleValueType(); - MVT EltVT = VT.getVectorElementType(); - unsigned NumElems = VT.getVectorNumElements(); - - assert((EltVT == MVT::i8 || EltVT == MVT::i16) && - "Unsupported element type"); - - // Split vector, it's Lo and Hi parts will be handled in next iteration. - if (16 < NumElems) - return LowerVectorIntUnary(Op, DAG); - - MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); - assert((NewVT.is256BitVector() || NewVT.is512BitVector()) && - "Unsupported value type for operation"); - - // Use native supported vector instruction vplzcntd. - Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0)); - SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op); - SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode); - SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT); - - return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta); -} - // Lower CTLZ using a PSHUFB lookup table implementation. static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL, const X86Subtarget &Subtarget, @@ -21796,13 +21834,52 @@ return Res; } +/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction. +// +// i8/i16 vector implemented using dword LZCNT vector instruction +// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal, +// split the vector, perform operation on it's Lo a Hi part and +// concatenate the results. +static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + assert(Op.getOpcode() == ISD::CTLZ); + SDLoc dl(Op); + MVT VT = Op.getSimpleValueType(); + MVT EltVT = VT.getVectorElementType(); + unsigned NumElems = VT.getVectorNumElements(); + + assert((EltVT == MVT::i8 || EltVT == MVT::i16) && + "Unsupported element type"); + + // Split vector, it's Lo and Hi parts will be handled in next iteration. + if (NumElems > 16 || (NumElems == 16 && !Subtarget.useAVX512Regs())) { + // If the input is v16i8, we can't split it, just fall back to LUT. + if (VT == MVT::v16i8) + return LowerVectorCTLZInRegLUT(Op, dl, Subtarget, DAG); + + return LowerVectorIntUnary(Op, DAG); + } + + MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); + assert((NewVT.is256BitVector() || NewVT.is512BitVector()) && + "Unsupported value type for operation"); + + // Use native supported vector instruction vplzcntd. + Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0)); + SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op); + SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode); + SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT); + + return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta); +} + static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL, const X86Subtarget &Subtarget, SelectionDAG &DAG) { MVT VT = Op.getSimpleValueType(); if (Subtarget.hasCDI()) - return LowerVectorCTLZ_AVX512CDI(Op, DAG); + return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget); // Decompose 256-bit ops into smaller 128-bit ops. if (VT.is256BitVector() && !Subtarget.hasInt256()) @@ -22206,7 +22283,7 @@ SDValue Hi = DAG.getIntPtrConstant(NumElems / 2, dl); if (VT == MVT::v32i8) { - if (Subtarget.hasBWI()) { + if (Subtarget.useBWIRegs()) { SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v32i16, A); SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v32i16, B); SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB); @@ -22996,9 +23073,10 @@ // types, but without AVX512 the extra overheads to get from vXi8 to vXi32 // make the existing SSE solution better. if ((Subtarget.hasInt256() && VT == MVT::v8i16) || - (Subtarget.hasAVX512() && VT == MVT::v16i16) || - (Subtarget.hasAVX512() && VT == MVT::v16i8) || - (Subtarget.hasBWI() && VT == MVT::v32i8)) { + (Subtarget.useAVX512Regs() && VT == MVT::v16i16) || + (Subtarget.useAVX512Regs() && VT == MVT::v16i8) || + (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8) || + (Subtarget.useBWIRegs() && VT == MVT::v32i8)) { assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) && "Unexpected vector type"); MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32; @@ -23822,7 +23900,7 @@ unsigned NumElems = VT.getVectorNumElements(); assert((VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) && "Unexpected type"); - if (NumElems <= 16) { + if (NumElems <= 16 && !(NumElems == 16 && !Subtarget.useAVX512Regs())) { MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0); Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op); Index: lib/Target/X86/X86Subtarget.h =================================================================== --- lib/Target/X86/X86Subtarget.h +++ lib/Target/X86/X86Subtarget.h @@ -352,6 +352,12 @@ /// unsigned MaxInlineSizeThreshold; + /// Prefer 256-bit AVX instructions over 512-bit instructions. + bool PreferVecWidth256; + + /// Indicates there are no 512-bit vectors present in the function. + bool No512BitVectors; + /// What processor and OS we're targeting. Triple TargetTriple; @@ -575,6 +581,19 @@ bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; } bool hasCLWB() const { return HasCLWB; } + bool preferVecWidth256() const { return PreferVecWidth256; } + + // If there are no 512-bit vectors and we prefer not to use 512-bit registers, + // disable them in the legalizer. We also need VLX support so we can do + // masked operations. + bool useAVX512Regs() const { + return hasAVX512() && !(hasVLX() && PreferVecWidth256 && No512BitVectors); + } + + bool useBWIRegs() const { + return hasBWI() && useAVX512Regs(); + } + bool isXRaySupported() const override { return is64Bit(); } X86ProcFamilyEnum getProcFamily() const { return X86ProcFamily; } Index: lib/Target/X86/X86Subtarget.cpp =================================================================== --- lib/Target/X86/X86Subtarget.cpp +++ lib/Target/X86/X86Subtarget.cpp @@ -345,6 +345,8 @@ X86ProcFamily = Others; GatherOverhead = 1024; ScatterOverhead = 1024; + PreferVecWidth256 = false; + No512BitVectors = false; } X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU, Index: lib/Target/X86/X86TargetMachine.cpp =================================================================== --- lib/Target/X86/X86TargetMachine.cpp +++ lib/Target/X86/X86TargetMachine.cpp @@ -54,6 +54,11 @@ cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden); +static cl::opt +InferRequiredVectorWidth("x86-experimental-infer-vector-width", + cl::desc("Infer the required vector width"), + cl::init(false), cl::Hidden); + namespace llvm { void initializeWinEHStatePassPass(PassRegistry &); @@ -80,6 +85,7 @@ initializeX86CmovConverterPassPass(PR); initializeX86ExecutionDepsFixPass(PR); initializeX86DomainReassignmentPass(PR); + initializeX86VectorWidthInferPass(PR); } static std::unique_ptr createTLOF(const Triple &TT) { @@ -255,6 +261,32 @@ if (SoftFloat) Key += FS.empty() ? "+soft-float" : ",+soft-float"; + // Translate vector width function attribute into subtarget features. This + // overrides any CPU specific turning parameter + if (F.hasFnAttribute("prefer-vector-width")) { + StringRef Val = F.getFnAttribute("prefer-vector-width").getValueAsString(); + unsigned Width; + if (!Val.getAsInteger(0, Width)) { + if (Key.size() > CPU.size()) + Key += ","; + Key += (Width < 512) ? "+prefer-vector-width-256" + : "-prefer-vector-width-256"; + } + } + + // Translate required vector width function attribute into subtarget features. + // This enables the legalizer to disable 512-bit vectors on targets that + // prefer to avoid them. + if (F.hasFnAttribute("require-vector-width")) { + StringRef Val = F.getFnAttribute("require-vector-width").getValueAsString(); + unsigned Width; + if (!Val.getAsInteger(0, Width)) { + if (Key.size() > CPU.size()) + Key += ","; + Key += (Width <= 256) ? "+no-512-bit-vectors" : "-no-512-bit-vectors"; + } + } + FS = Key.substr(CPU.size()); auto &I = SubtargetMap[Key]; @@ -395,6 +427,9 @@ } bool X86PassConfig::addPreISel() { + if (InferRequiredVectorWidth) + addPass(createX86VectorWidthInferPass()); + // Only add this pass for 32-bit x86 Windows. const Triple &TT = TM->getTargetTriple(); if (TT.isOSWindows() && TT.getArch() == Triple::x86) Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -131,7 +131,7 @@ unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { if (Vector) { - if (ST->hasAVX512()) + if (ST->hasAVX512() && !ST->preferVecWidth256()) return 512; if (ST->hasAVX()) return 256; @@ -2522,7 +2522,7 @@ // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only // enable gather with a -march. return (DataWidth == 32 || DataWidth == 64) && - (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); + (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); } bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { Index: lib/Target/X86/X86VectorWidthInfer.cpp =================================================================== --- /dev/null +++ lib/Target/X86/X86VectorWidthInfer.cpp @@ -0,0 +1,126 @@ +//===- X86VectorWidthInfer.cpp - Infer require-vector-width attribute -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file This pass tries to infer the required vector with for a function +/// if the require-vector-width attribute isn't present. +// ===---------------------------------------------------------------------===// + +#include "X86TargetMachine.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/Pass.h" + +using namespace llvm; + +#define DEBUG_TYPE "x86-vector-width-fix" + +namespace { + +class X86VectorWidthInfer : public FunctionPass { +public: + static char ID; // Pass ID + + X86VectorWidthInfer() : FunctionPass(ID) { + initializeX86VectorWidthInferPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + } + + bool runOnFunction(Function &F) override; +}; + +} // end anonymous namespace + +char X86VectorWidthInfer::ID = 0; + +INITIALIZE_PASS_BEGIN(X86VectorWidthInfer, DEBUG_TYPE, + "X86 Vector Width Infer", false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(X86VectorWidthInfer, DEBUG_TYPE, + "X86 Vector Width Infer", false, false) + +FunctionPass *llvm::createX86VectorWidthInferPass() { + return new X86VectorWidthInfer(); +} + +bool X86VectorWidthInfer::runOnFunction(Function &F) { + TargetPassConfig &TPC = getAnalysis(); + const X86Subtarget *ST = + TPC.getTM().getSubtargetImpl(F); + + // If the target doesn't support 512-bit vectors or doesn't prefer them, + // then there is nothing to do. + if (!ST->hasAVX512() || !ST->preferVecWidth256()) + return false; + + unsigned RequiredWidth = 0; + + // If we already have a function attribute and it says that 512-bit vectors + // are required, we are done. + // TODO: In the future we should maybe just trust the attribute. + if (F.hasFnAttribute("require-vector-width")) { + StringRef Val = F.getFnAttribute("require-vector-width").getValueAsString(); + unsigned Width; + if (!Val.getAsInteger(0, Width)) { + if (Width > 256) + return false; + RequiredWidth = Width; + } + } + + // Check for a vector return type. + Type *RetTy = F.getReturnType(); + if (RetTy->isVectorTy()) + RequiredWidth = std::max(RequiredWidth, + RetTy->getPrimitiveSizeInBits()); + + // Check for any vector arguments. + for (const auto &A : F.args()) { + Type *ArgTy = A.getType(); + if (ArgTy->isVectorTy()) + RequiredWidth = std::max(RequiredWidth, + ArgTy->getPrimitiveSizeInBits()); + } + + // Otherwise scan for any calls that need wide registers to match ABI. + // Also need this for any target specific intrinsics. + for (auto &BB : F) { + for (auto &I : BB) { + if (auto *CI = dyn_cast(&I)) { + // We can handle target independent intrinsics via type legalization so + // skip those. + if (auto *II = dyn_cast(&I)) { + StringRef Name = II->getCalledFunction()->getName(); + if (!Name.startswith("llvm.x86.")) + continue; + } + // Ok we have a call. Check its types. + Type *RetTy = CI->getType(); + if (RetTy->isVectorTy()) + RequiredWidth = std::max(RequiredWidth, + RetTy->getPrimitiveSizeInBits()); + for (Value *A : CI->arg_operands()) { + Type *ArgTy = A->getType(); + if (ArgTy->isVectorTy()) + RequiredWidth = std::max(RequiredWidth, + ArgTy->getPrimitiveSizeInBits()); + } + } + } + } + + // Remove and replace function's prefer-vector-width attribute. + // TODO this should be more generic, but this will work until we have wider + // vectors. + F.removeFnAttr("require-vector-width"); + F.addFnAttr("require-vector-width", (RequiredWidth > 256) ? "512" : "256"); + + return false; +} Index: test/CodeGen/X86/prefer-avx256-shift.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/prefer-avx256-shift.ll @@ -0,0 +1,419 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+prefer-vector-width-256,+no-512-bit-vectors | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-vector-width-256,+no-512-bit-vectors | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL + +define <32 x i8> @var_shl_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_shl_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = shl <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_shl_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_shl_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_shl_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsllvd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = shl <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_shl_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_shl_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_shl_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsllw $4, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsllw $2, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_shl_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_shl_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = shl <16 x i8> %a, %b + ret <16 x i8> %shift +} + +define <32 x i8> @var_lshr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_lshr_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpsrlw $4, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $2, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $1, %ymm0, %ymm2 +; AVX256-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX256-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm2 +; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = lshr <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_lshr_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_lshr_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_lshr_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = lshr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_lshr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_lshr_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX256BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_lshr_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsrlw $4, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX256VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX256VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_lshr_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_lshr_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = lshr <16 x i8> %a, %b + ret <16 x i8> %shift +} + +define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { +; AVX256-LABEL: var_ashr_v32i8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX256-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX256-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX256-NEXT: vpsraw $4, %ymm3, %ymm4 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; AVX256-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX256-NEXT: vpsraw $4, %ymm0, %ymm3 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsraw $2, %ymm0, %ymm3 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsraw $1, %ymm0, %ymm3 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX256-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX256-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero +; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 +; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; AVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; AVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = ashr <32 x i8> %a, %b + ret <32 x i8> %shift +} + +define <16 x i16> @var_ashr_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { +; AVX256BW-LABEL: var_ashr_v16i16: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_ashr_v16i16: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15] +; AVX256VL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15] +; AVX256VL-NEXT: vpsravd %ymm3, %ymm4, %ymm3 +; AVX256VL-NEXT: vpsrld $16, %ymm3, %ymm3 +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11] +; AVX256VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11] +; AVX256VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 +; AVX256VL-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX256VL-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: retq + %shift = ashr <16 x i16> %a, %b + ret <16 x i16> %shift +} + +define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { +; AVX256BW-LABEL: var_ashr_v16i8: +; AVX256BW: # %bb.0: +; AVX256BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX256BW-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX256BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX256BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX256BW-NEXT: vzeroupper +; AVX256BW-NEXT: retq +; +; AVX256VL-LABEL: var_ashr_v16i8: +; AVX256VL: # %bb.0: +; AVX256VL-NEXT: vpsllw $5, %xmm1, %xmm1 +; AVX256VL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX256VL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX256VL-NEXT: vpsraw $4, %xmm3, %xmm4 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 +; AVX256VL-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX256VL-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX256VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX256VL-NEXT: retq +; +; AVX512BW-LABEL: var_ashr_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX512BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: var_ashr_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq + %shift = ashr <16 x i8> %a, %b + ret <16 x i8> %shift +} Index: test/CodeGen/X86/prefer-avx256-wide-mul.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/prefer-avx256-wide-mul.ll @@ -0,0 +1,46 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-vector-width-256 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,-prefer-vector-width-256 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW + +define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind "require-vector-width"="256" { +; CHECK-LABEL: test_div7_32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; CHECK-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; CHECK-NEXT: vpsrlw $8, %zmm1, %zmm1 +; CHECK-NEXT: vpmovwb %zmm1, %ymm1 +; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $1, %ymm0, %ymm0 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $2, %ymm0, %ymm0 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; CHECK-NEXT: retq + %res = udiv <32 x i8> %a, + ret <32 x i8> %res +} + +define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind "require-vector-width"="512" { +; CHECK-LABEL: test_div7_64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; CHECK-NEXT: vpmullw %zmm2, %zmm1, %zmm1 +; CHECK-NEXT: vpsrlw $8, %zmm1, %zmm1 +; CHECK-NEXT: vpmovwb %zmm1, %ymm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; CHECK-NEXT: vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero +; CHECK-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; CHECK-NEXT: vpsrlw $8, %zmm2, %zmm2 +; CHECK-NEXT: vpmovwb %zmm2, %ymm2 +; CHECK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 +; CHECK-NEXT: vpsubb %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpsrlw $1, %zmm0, %zmm0 +; CHECK-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; CHECK-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpsrlw $2, %zmm0, %zmm0 +; CHECK-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = udiv <64 x i8> %a, + ret <64 x i8> %res +} Index: test/Transforms/LoopVectorize/X86/avx512.ll =================================================================== --- test/Transforms/LoopVectorize/X86/avx512.ll +++ test/Transforms/LoopVectorize/X86/avx512.ll @@ -1,4 +1,5 @@ -; RUN: opt -mattr=+avx512f --loop-vectorize -S < %s | llc -mattr=+avx512f | FileCheck %s +; RUN: opt -mattr=+avx512f,-prefer-vector-width-256 --loop-vectorize -S < %s | llc -mattr=+avx512f | FileCheck %s +; RUN: opt -mattr=+avx512vl,+prefer-vector-width-256 --loop-vectorize -S < %s | llc -mattr=+avx512f | FileCheck %s --check-prefix=CHECK-PREFER-AVX256 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.9.0" @@ -10,6 +11,12 @@ ; CHECK: vmovdqu32 %zmm{{.}}, ; CHECK-NOT: %ymm +; Verify that we don't generate 512-bit wide vectors when prefer-avx256 width says not to + +; CHECK-PREFER-AVX256-LABEL: f: +; CHECK-PREFER-AVX256: vmovdqu %ymm{{.}}, +; CHECK-PREFER-AVX256-NOT: %zmm + define void @f(i32* %a, i32 %n) { entry: %cmp4 = icmp sgt i32 %n, 0 @@ -33,3 +40,38 @@ for.end: ; preds = %for.end.loopexit, %entry ret void } + +; Verify that we "prefer-vector-width=256" attribute prevents the use of 512-bit +; vectors + +; CHECK-LABEL: g: +; CHECK: vmovdqu %ymm{{.}}, +; CHECK-NOT: %zmm + +; CHECK-PREFER-AVX256-LABEL: g: +; CHECK-PREFER-AVX256: vmovdqu %ymm{{.}}, +; CHECK-PREFER-AVX256-NOT: %zmm + +define void @g(i32* %a, i32 %n) "prefer-vector-width"="256" { +entry: + %cmp4 = icmp sgt i32 %n, 0 + br i1 %cmp4, label %for.body.preheader, label %for.end + +for.body.preheader: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + store i32 %n, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +}