diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h --- a/llvm/include/llvm/Analysis/VectorUtils.h +++ b/llvm/include/llvm/Analysis/VectorUtils.h @@ -398,6 +398,24 @@ bool widenShuffleMaskElts(int Scale, ArrayRef Mask, SmallVectorImpl &ScaledMask); +/// Splits and processes shuffle mask depending on the number of input and +/// output registers. The function does 2 main things: 1) splits the +/// source/destination vectors into real registers; 2) do the mask analysis to +/// identify which real registers are permuted. Then the function processes +/// resulting registers mask using provided action items. If no input register +/// is defined, \p NoInputAction action is used. If only 1 input register is +/// used, \p SingleInputAction is used, otherwise \p ManyInputsAction is used to +/// process > 2 input registers and masks. +/// \param Mask Original shuffle mask. +/// \param NumOfSrcRegs Number of source registers. +/// \param NumOfDestRegs Number of destination registers. +/// \param NumOfUsedRegs Number of actually used destination registers. +void processShuffleMasks( + ArrayRef Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, + unsigned NumOfUsedRegs, function_ref NoInputAction, + function_ref, unsigned)> SingleInputAction, + function_ref, unsigned, unsigned)> ManyInputsAction); + /// Compute a map of integer instructions to their minimum legal type /// size. /// diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -496,6 +496,116 @@ return true; } +void llvm::processShuffleMasks( + ArrayRef Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, + unsigned NumOfUsedRegs, function_ref NoInputAction, + function_ref, unsigned)> SingleInputAction, + function_ref, unsigned, unsigned)> ManyInputsAction) { + SmallVector>> Res(NumOfDestRegs); + // Try to perform better estimation of the permutation. + // 1. Split the source/destination vectors into real registers. + // 2. Do the mask analysis to identify which real registers are + // permuted. + int Sz = Mask.size(); + unsigned SzDest = Sz / NumOfDestRegs; + unsigned SzSrc = Sz / NumOfSrcRegs; + for (unsigned I = 0; I < NumOfDestRegs; ++I) { + auto &RegMasks = Res[I]; + RegMasks.assign(NumOfSrcRegs, {}); + // Check that the values in dest registers are in the one src + // register. + for (unsigned K = 0; K < SzDest; ++K) { + int Idx = I * SzDest + K; + if (Idx == Sz) + break; + if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem) + continue; + int SrcRegIdx = Mask[Idx] / SzSrc; + // Add a cost of PermuteTwoSrc for each new source register permute, + // if we have more than one source registers. + if (RegMasks[SrcRegIdx].empty()) + RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem); + RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc; + } + } + // Process split mask. + for (unsigned I = 0; I < NumOfUsedRegs; ++I) { + auto &Dest = Res[I]; + int NumSrcRegs = + count_if(Dest, [](ArrayRef Mask) { return !Mask.empty(); }); + switch (NumSrcRegs) { + case 0: + // No input vectors were used! + NoInputAction(); + break; + case 1: { + // Find the only mask with at least single undef mask elem. + auto *It = + find_if(Dest, [](ArrayRef Mask) { return !Mask.empty(); }); + unsigned SrcReg = std::distance(Dest.begin(), It); + SingleInputAction(*It, SrcReg); + break; + } + default: { + // The first mask is a permutation of a single register. Since we have >2 + // input registers to shuffle, we merge the masks for 2 first registers + // and generate a shuffle of 2 registers rather than the reordering of the + // first register and then shuffle with the second register. Next, + // generate the shuffles of the resulting register + the remaining + // registers from the list. + auto &&CombineMasks = [](MutableArrayRef FirstMask, + ArrayRef SecondMask) { + for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { + if (SecondMask[Idx] != UndefMaskElem) { + assert(FirstMask[Idx] == UndefMaskElem && + "Expected undefined mask element."); + FirstMask[Idx] = SecondMask[Idx] + VF; + } + } + }; + auto &&NormalizeMask = [](MutableArrayRef Mask) { + for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { + if (Mask[Idx] != UndefMaskElem) + Mask[Idx] = Idx; + } + }; + int SecondIdx; + do { + int FirstIdx = -1; + SecondIdx = -1; + MutableArrayRef FirstMask, SecondMask; + for (unsigned I = 0; I < NumOfDestRegs; ++I) { + SmallVectorImpl &RegMask = Dest[I]; + if (RegMask.empty()) + continue; + + if (FirstIdx == SecondIdx) { + FirstIdx = I; + FirstMask = RegMask; + continue; + } + SecondIdx = I; + SecondMask = RegMask; + CombineMasks(FirstMask, SecondMask); + RegMask.clear(); + SecondMask = FirstMask; + ManyInputsAction(FirstMask, FirstIdx, SecondIdx); + NormalizeMask(FirstMask); + FirstIdx = SecondIdx; + } + if (FirstIdx != SecondIdx && SecondIdx >= 0) { + CombineMasks(SecondMask, FirstMask); + ManyInputsAction(SecondMask, SecondIdx, FirstIdx); + Dest[FirstIdx].clear(); + NormalizeMask(SecondMask); + } + } while (SecondIdx >= 0); + break; + } + } + } +} + MapVector llvm::computeMinimumValueSizes(ArrayRef Blocks, DemandedBits &DB, const TargetTransformInfo *TTI) { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -20,7 +20,9 @@ //===----------------------------------------------------------------------===// #include "LegalizeTypes.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/MemoryLocation.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/DataLayout.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TypeSize.h" @@ -2168,7 +2170,7 @@ SDValue &Lo, SDValue &Hi) { // The low and high parts of the original input give four input vectors. SDValue Inputs[4]; - SDLoc dl(N); + SDLoc DL(N); GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]); GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]); EVT NewVT = Inputs[0].getValueType(); @@ -2177,99 +2179,33 @@ // If Lo or Hi uses elements from at most two of the four input vectors, then // express it as a vector shuffle of those two inputs. Otherwise extract the // input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR. - SmallVector Ops; + SDValue TmpInputs[4]; + copy(Inputs, std::begin(TmpInputs)); for (unsigned High = 0; High < 2; ++High) { SDValue &Output = High ? Hi : Lo; // Build a shuffle mask for the output, discovering on the fly which - // input vectors to use as shuffle operands (recorded in InputUsed). - // If building a suitable shuffle vector proves too hard, then bail - // out with useBuildVector set. - unsigned InputUsed[2] = { -1U, -1U }; // Not yet discovered. + // input vectors to use as shuffle operands. unsigned FirstMaskIdx = High * NewElts; - bool useBuildVector = false; - for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) { - // The mask element. This indexes into the input. - int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset); - - // The input vector this mask element indexes into. - unsigned Input = (unsigned)Idx / NewElts; - - if (Input >= array_lengthof(Inputs)) { - // The mask element does not index into any input vector. - Ops.push_back(-1); - continue; - } - - // Turn the index into an offset from the start of the input vector. - Idx -= Input * NewElts; - - // Find or create a shuffle vector operand to hold this input. - unsigned OpNo; - for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { - if (InputUsed[OpNo] == Input) { - // This input vector is already an operand. - break; - } else if (InputUsed[OpNo] == -1U) { - // Create a new operand for this input vector. - InputUsed[OpNo] = Input; - break; - } - } - - if (OpNo >= array_lengthof(InputUsed)) { - // More than two input vectors used! Give up on trying to create a - // shuffle vector. Insert all elements into a BUILD_VECTOR instead. - useBuildVector = true; - break; - } - - // Add the mask index for the new shuffle vector. - Ops.push_back(Idx + OpNo * NewElts); - } - - if (useBuildVector) { - EVT EltVT = NewVT.getVectorElementType(); - SmallVector SVOps; - - // Extract the input elements by hand. - for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) { - // The mask element. This indexes into the input. - int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset); - - // The input vector this mask element indexes into. - unsigned Input = (unsigned)Idx / NewElts; - - if (Input >= array_lengthof(Inputs)) { - // The mask element is "undef" or indexes off the end of the input. - SVOps.push_back(DAG.getUNDEF(EltVT)); - continue; - } - - // Turn the index into an offset from the start of the input vector. - Idx -= Input * NewElts; - - // Extract the vector element by hand. - SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, - Inputs[Input], - DAG.getVectorIdxConstant(Idx, dl))); - } - - // Construct the Lo/Hi output using a BUILD_VECTOR. - Output = DAG.getBuildVector(NewVT, dl, SVOps); - } else if (InputUsed[0] == -1U) { - // No input vectors were used! The result is undefined. - Output = DAG.getUNDEF(NewVT); - } else { - SDValue Op0 = Inputs[InputUsed[0]]; - // If only one input was used, use an undefined vector for the other. - SDValue Op1 = InputUsed[1] == -1U ? - DAG.getUNDEF(NewVT) : Inputs[InputUsed[1]]; - // At least one input vector was used. Create a new shuffle vector. - Output = DAG.getVectorShuffle(NewVT, dl, Op0, Op1, Ops); - } - - Ops.clear(); + SmallVector Mask(NewElts * array_lengthof(Inputs), UndefMaskElem); + copy(N->getMask().slice(FirstMaskIdx, NewElts), Mask.begin()); + assert(!Output && "Expected default initialized initial value."); + processShuffleMasks( + Mask, array_lengthof(Inputs), array_lengthof(Inputs), + /*NumOfUsedRegs=*/1, + [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); }, + [&Output, &DAG = DAG, NewVT, &DL, &Inputs](ArrayRef Mask, + unsigned Idx) { + Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx], + DAG.getUNDEF(NewVT), Mask); + }, + [&Output, &DAG = DAG, NewVT, &DL, + &Inputs](ArrayRef Mask, unsigned Idx1, unsigned Idx2) { + Output = + DAG.getVectorShuffle(NewVT, DL, Inputs[Idx1], Inputs[Idx2], Mask); + Inputs[Idx1] = Output; + }); + std::swap(Inputs, TmpInputs); } } diff --git a/llvm/test/CodeGen/AArch64/insert-extend.ll b/llvm/test/CodeGen/AArch64/insert-extend.ll --- a/llvm/test/CodeGen/AArch64/insert-extend.ll +++ b/llvm/test/CodeGen/AArch64/insert-extend.ll @@ -89,6 +89,7 @@ ; CHECK-NEXT: uzp1 v18.8b, v18.8b, v19.8b ; CHECK-NEXT: ushll v1.8h, v1.8b, #0 ; CHECK-NEXT: ushll v3.8h, v3.8b, #0 +; CHECK-NEXT: ushll v7.8h, v7.8b, #0 ; CHECK-NEXT: ushll v17.8h, v17.8b, #0 ; CHECK-NEXT: ushll v20.8h, v20.8b, #0 ; CHECK-NEXT: ushll v6.8h, v16.8b, #0 @@ -97,7 +98,6 @@ ; CHECK-NEXT: ushll v2.8h, v2.8b, #0 ; CHECK-NEXT: ushll v19.8h, v21.8b, #0 ; CHECK-NEXT: ushll v5.8h, v5.8b, #0 -; CHECK-NEXT: ushll v7.8h, v7.8b, #0 ; CHECK-NEXT: usubl v18.4s, v6.4h, v16.4h ; CHECK-NEXT: usubl2 v6.4s, v6.8h, v16.8h ; CHECK-NEXT: usubl v16.4s, v4.4h, v2.4h @@ -107,8 +107,8 @@ ; CHECK-NEXT: uzp1 v3.8b, v5.8b, v20.8b ; CHECK-NEXT: uzp1 v0.8b, v7.8b, v0.8b ; CHECK-NEXT: ushll v4.8h, v4.8b, #0 -; CHECK-NEXT: ushll v3.8h, v3.8b, #0 ; CHECK-NEXT: ushll v1.8h, v1.8b, #0 +; CHECK-NEXT: ushll v3.8h, v3.8b, #0 ; CHECK-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-NEXT: usubl2 v5.4s, v4.8h, v3.8h ; CHECK-NEXT: usubl v3.4s, v4.4h, v3.4h @@ -117,114 +117,116 @@ ; CHECK-NEXT: shl v1.4s, v3.4s, #16 ; CHECK-NEXT: shl v3.4s, v5.4s, #16 ; CHECK-NEXT: shl v4.4s, v4.4s, #16 -; CHECK-NEXT: add v1.4s, v1.4s, v18.4s ; CHECK-NEXT: shl v0.4s, v0.4s, #16 ; CHECK-NEXT: add v3.4s, v3.4s, v6.4s +; CHECK-NEXT: add v1.4s, v1.4s, v18.4s ; CHECK-NEXT: add v2.4s, v4.4s, v2.4s +; CHECK-NEXT: add v0.4s, v0.4s, v16.4s ; CHECK-NEXT: rev64 v4.4s, v3.4s ; CHECK-NEXT: rev64 v5.4s, v1.4s -; CHECK-NEXT: add v0.4s, v0.4s, v16.4s ; CHECK-NEXT: rev64 v6.4s, v2.4s ; CHECK-NEXT: rev64 v7.4s, v0.4s ; CHECK-NEXT: add v16.4s, v3.4s, v4.4s ; CHECK-NEXT: add v17.4s, v1.4s, v5.4s -; CHECK-NEXT: sub v1.4s, v1.4s, v5.4s -; CHECK-NEXT: trn2 v5.4s, v16.4s, v17.4s ; CHECK-NEXT: add v18.4s, v2.4s, v6.4s ; CHECK-NEXT: add v19.4s, v0.4s, v7.4s ; CHECK-NEXT: sub v2.4s, v2.4s, v6.4s ; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s ; CHECK-NEXT: sub v3.4s, v3.4s, v4.4s -; CHECK-NEXT: trn2 v4.4s, v19.4s, v18.4s -; CHECK-NEXT: ext v6.16b, v5.16b, v16.16b, #8 -; CHECK-NEXT: zip1 v7.4s, v0.4s, v2.4s -; CHECK-NEXT: trn2 v16.4s, v17.4s, v16.4s -; CHECK-NEXT: ext v4.16b, v19.16b, v4.16b, #8 -; CHECK-NEXT: zip1 v20.4s, v3.4s, v1.4s -; CHECK-NEXT: ext v7.16b, v0.16b, v7.16b, #8 -; CHECK-NEXT: ext v17.16b, v16.16b, v17.16b, #8 +; CHECK-NEXT: trn2 v4.4s, v17.4s, v16.4s +; CHECK-NEXT: zip1 v21.4s, v0.4s, v2.4s +; CHECK-NEXT: sub v1.4s, v1.4s, v5.4s +; CHECK-NEXT: mov v6.16b, v17.16b +; CHECK-NEXT: ext v5.16b, v4.16b, v17.16b, #8 +; CHECK-NEXT: trn2 v7.4s, v19.4s, v18.4s +; CHECK-NEXT: zip2 v20.4s, v3.4s, v1.4s +; CHECK-NEXT: mov v3.s[1], v1.s[0] +; CHECK-NEXT: ext v1.16b, v0.16b, v21.16b, #8 +; CHECK-NEXT: ext v17.16b, v17.16b, v17.16b, #12 +; CHECK-NEXT: mov v6.s[0], v16.s[1] +; CHECK-NEXT: ext v7.16b, v19.16b, v7.16b, #8 ; CHECK-NEXT: zip2 v18.4s, v19.4s, v18.4s -; CHECK-NEXT: zip2 v1.4s, v3.4s, v1.4s +; CHECK-NEXT: mov v3.d[1], v1.d[1] ; CHECK-NEXT: mov v0.s[3], v2.s[2] -; CHECK-NEXT: mov v5.d[1], v4.d[1] -; CHECK-NEXT: mov v20.d[1], v7.d[1] -; CHECK-NEXT: mov v17.d[1], v18.d[1] -; CHECK-NEXT: mov v16.d[1], v4.d[1] -; CHECK-NEXT: mov v1.d[1], v0.d[1] -; CHECK-NEXT: mov v6.d[1], v18.d[1] -; CHECK-NEXT: add v0.4s, v17.4s, v16.4s -; CHECK-NEXT: add v2.4s, v1.4s, v20.4s -; CHECK-NEXT: sub v3.4s, v5.4s, v6.4s -; CHECK-NEXT: sub v1.4s, v20.4s, v1.4s -; CHECK-NEXT: rev64 v4.4s, v0.4s +; CHECK-NEXT: ext v1.16b, v16.16b, v17.16b, #12 +; CHECK-NEXT: mov v6.d[1], v7.d[1] +; CHECK-NEXT: mov v5.d[1], v18.d[1] +; CHECK-NEXT: mov v20.d[1], v0.d[1] +; CHECK-NEXT: mov v1.d[1], v18.d[1] +; CHECK-NEXT: mov v4.d[1], v7.d[1] +; CHECK-NEXT: add v2.4s, v20.4s, v3.4s +; CHECK-NEXT: sub v1.4s, v6.4s, v1.4s +; CHECK-NEXT: sub v3.4s, v3.4s, v20.4s +; CHECK-NEXT: add v0.4s, v5.4s, v4.4s +; CHECK-NEXT: rev64 v4.4s, v1.4s ; CHECK-NEXT: rev64 v5.4s, v3.4s -; CHECK-NEXT: rev64 v6.4s, v1.4s -; CHECK-NEXT: rev64 v7.4s, v2.4s -; CHECK-NEXT: add v16.4s, v0.4s, v4.4s +; CHECK-NEXT: rev64 v6.4s, v2.4s +; CHECK-NEXT: rev64 v7.4s, v0.4s +; CHECK-NEXT: add v16.4s, v1.4s, v4.4s ; CHECK-NEXT: add v17.4s, v3.4s, v5.4s -; CHECK-NEXT: add v18.4s, v1.4s, v6.4s -; CHECK-NEXT: add v19.4s, v2.4s, v7.4s -; CHECK-NEXT: sub v2.4s, v2.4s, v7.4s -; CHECK-NEXT: sub v1.4s, v1.4s, v6.4s +; CHECK-NEXT: add v18.4s, v2.4s, v6.4s +; CHECK-NEXT: sub v1.4s, v1.4s, v4.4s +; CHECK-NEXT: sub v2.4s, v2.4s, v6.4s ; CHECK-NEXT: sub v3.4s, v3.4s, v5.4s -; CHECK-NEXT: sub v0.4s, v0.4s, v4.4s -; CHECK-NEXT: ext v4.16b, v2.16b, v19.16b, #12 -; CHECK-NEXT: ext v5.16b, v1.16b, v18.16b, #12 -; CHECK-NEXT: ext v7.16b, v3.16b, v17.16b, #12 +; CHECK-NEXT: ext v4.16b, v18.16b, v2.16b, #4 +; CHECK-NEXT: ext v5.16b, v17.16b, v3.16b, #4 +; CHECK-NEXT: ext v6.16b, v16.16b, v1.16b, #4 +; CHECK-NEXT: add v16.4s, v0.4s, v7.4s +; CHECK-NEXT: ext v17.16b, v4.16b, v4.16b, #12 +; CHECK-NEXT: ext v18.16b, v5.16b, v5.16b, #12 +; CHECK-NEXT: ext v19.16b, v6.16b, v6.16b, #12 ; CHECK-NEXT: rev64 v16.4s, v16.4s -; CHECK-NEXT: ext v6.16b, v4.16b, v2.16b, #4 -; CHECK-NEXT: ext v17.16b, v4.16b, v4.16b, #8 -; CHECK-NEXT: ext v18.16b, v5.16b, v1.16b, #4 -; CHECK-NEXT: ext v19.16b, v5.16b, v5.16b, #8 -; CHECK-NEXT: ext v20.16b, v7.16b, v3.16b, #4 -; CHECK-NEXT: ext v21.16b, v7.16b, v7.16b, #8 -; CHECK-NEXT: rev64 v7.4s, v7.4s +; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s +; CHECK-NEXT: ext v7.16b, v2.16b, v17.16b, #12 +; CHECK-NEXT: ext v17.16b, v3.16b, v18.16b, #12 +; CHECK-NEXT: ext v18.16b, v1.16b, v19.16b, #12 ; CHECK-NEXT: trn2 v0.4s, v16.4s, v0.4s +; CHECK-NEXT: rev64 v6.4s, v6.4s ; CHECK-NEXT: rev64 v5.4s, v5.4s ; CHECK-NEXT: rev64 v4.4s, v4.4s -; CHECK-NEXT: ext v6.16b, v6.16b, v17.16b, #12 -; CHECK-NEXT: ext v17.16b, v18.16b, v19.16b, #12 -; CHECK-NEXT: ext v18.16b, v20.16b, v21.16b, #12 -; CHECK-NEXT: ext v3.16b, v7.16b, v3.16b, #4 -; CHECK-NEXT: ext v7.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: ext v1.16b, v5.16b, v1.16b, #4 -; CHECK-NEXT: ext v2.16b, v4.16b, v2.16b, #4 -; CHECK-NEXT: add v4.4s, v18.4s, v3.4s -; CHECK-NEXT: add v5.4s, v0.4s, v7.4s -; CHECK-NEXT: add v16.4s, v17.4s, v1.4s -; CHECK-NEXT: add v19.4s, v6.4s, v2.4s -; CHECK-NEXT: sub v3.4s, v18.4s, v3.4s -; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s -; CHECK-NEXT: sub v2.4s, v6.4s, v2.4s -; CHECK-NEXT: sub v1.4s, v17.4s, v1.4s +; CHECK-NEXT: ext v7.16b, v7.16b, v7.16b, #4 +; CHECK-NEXT: ext v17.16b, v17.16b, v17.16b, #4 +; CHECK-NEXT: ext v18.16b, v18.16b, v18.16b, #4 +; CHECK-NEXT: ext v16.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: ext v1.16b, v1.16b, v6.16b, #12 +; CHECK-NEXT: ext v3.16b, v3.16b, v5.16b, #12 +; CHECK-NEXT: ext v2.16b, v2.16b, v4.16b, #12 +; CHECK-NEXT: add v4.4s, v0.4s, v16.4s +; CHECK-NEXT: add v5.4s, v18.4s, v1.4s +; CHECK-NEXT: add v6.4s, v17.4s, v3.4s +; CHECK-NEXT: add v19.4s, v7.4s, v2.4s +; CHECK-NEXT: sub v0.4s, v0.4s, v16.4s +; CHECK-NEXT: sub v1.4s, v18.4s, v1.4s +; CHECK-NEXT: sub v2.4s, v7.4s, v2.4s +; CHECK-NEXT: sub v3.4s, v17.4s, v3.4s ; CHECK-NEXT: mov v19.d[1], v2.d[1] -; CHECK-NEXT: mov v16.d[1], v1.d[1] -; CHECK-NEXT: mov v4.d[1], v3.d[1] -; CHECK-NEXT: mov v5.d[1], v0.d[1] +; CHECK-NEXT: mov v6.d[1], v3.d[1] +; CHECK-NEXT: mov v4.d[1], v0.d[1] +; CHECK-NEXT: mov v5.d[1], v1.d[1] ; CHECK-NEXT: movi v0.8h, #1 -; CHECK-NEXT: movi v7.2d, #0x00ffff0000ffff +; CHECK-NEXT: movi v16.2d, #0x00ffff0000ffff ; CHECK-NEXT: ushr v1.4s, v4.4s, #15 ; CHECK-NEXT: ushr v2.4s, v19.4s, #15 ; CHECK-NEXT: ushr v3.4s, v5.4s, #15 -; CHECK-NEXT: ushr v6.4s, v16.4s, #15 +; CHECK-NEXT: ushr v7.4s, v6.4s, #15 ; CHECK-NEXT: and v2.16b, v2.16b, v0.16b -; CHECK-NEXT: and v6.16b, v6.16b, v0.16b +; CHECK-NEXT: and v7.16b, v7.16b, v0.16b ; CHECK-NEXT: and v3.16b, v3.16b, v0.16b ; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: mul v1.4s, v2.4s, v7.4s -; CHECK-NEXT: mul v2.4s, v6.4s, v7.4s -; CHECK-NEXT: mul v0.4s, v0.4s, v7.4s -; CHECK-NEXT: mul v3.4s, v3.4s, v7.4s -; CHECK-NEXT: add v6.4s, v1.4s, v19.4s -; CHECK-NEXT: add v7.4s, v2.4s, v16.4s +; CHECK-NEXT: mul v1.4s, v2.4s, v16.4s +; CHECK-NEXT: mul v2.4s, v7.4s, v16.4s +; CHECK-NEXT: mul v0.4s, v0.4s, v16.4s +; CHECK-NEXT: mul v3.4s, v3.4s, v16.4s +; CHECK-NEXT: add v7.4s, v1.4s, v19.4s +; CHECK-NEXT: add v6.4s, v2.4s, v6.4s ; CHECK-NEXT: add v4.4s, v0.4s, v4.4s ; CHECK-NEXT: add v5.4s, v3.4s, v5.4s ; CHECK-NEXT: eor v0.16b, v4.16b, v0.16b ; CHECK-NEXT: eor v3.16b, v5.16b, v3.16b -; CHECK-NEXT: eor v2.16b, v7.16b, v2.16b -; CHECK-NEXT: eor v1.16b, v6.16b, v1.16b +; CHECK-NEXT: eor v2.16b, v6.16b, v2.16b +; CHECK-NEXT: eor v1.16b, v7.16b, v1.16b ; CHECK-NEXT: add v1.4s, v1.4s, v2.4s -; CHECK-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-NEXT: add v0.4s, v0.4s, v3.4s ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w8, s0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll --- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll @@ -41,26 +41,18 @@ define void @vst3_v4i32(<4 x i32> *%src, <12 x i32> *%dst) { ; CHECK-LABEL: vst3_v4i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-NEXT: vldrw.u32 q3, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #32] -; CHECK-NEXT: vmov.f32 s8, s5 -; CHECK-NEXT: vmov.f32 s9, s1 -; CHECK-NEXT: vmov.f32 s18, s0 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov r0, r2, d0 ; CHECK-NEXT: vmov.f32 s0, s2 -; CHECK-NEXT: vmov.f32 s11, s6 -; CHECK-NEXT: vmov.f32 s10, s14 -; CHECK-NEXT: vmov.f32 s16, s12 -; CHECK-NEXT: vstrw.32 q2, [r1, #16] -; CHECK-NEXT: vmov.f32 s17, s4 -; CHECK-NEXT: vmov.f32 s19, s13 -; CHECK-NEXT: vmov.f32 s1, s15 -; CHECK-NEXT: vstrw.32 q4, [r1] -; CHECK-NEXT: vmov.f32 s2, s7 +; CHECK-NEXT: vmov.f32 s1, s5 +; CHECK-NEXT: vmov.f32 s2, s6 ; CHECK-NEXT: vstrw.32 q0, [r1, #32] -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: vmov.32 q0[1], r2 +; CHECK-NEXT: vmov.32 q1[2], r0 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vstrw.32 q1, [r1] ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0 @@ -79,49 +71,36 @@ define void @vst3_v8i32(<8 x i32> *%src, <24 x i32> *%dst) { ; CHECK-LABEL: vst3_v8i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #16 -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: vldrw.u32 q7, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0, #80] -; CHECK-NEXT: vldrw.u32 q4, [r0, #16] -; CHECK-NEXT: vldrw.u32 q3, [r0, #48] -; CHECK-NEXT: vstrw.32 q7, [sp] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q6, [r0, #32] +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vldrw.u32 q1, [r0, #64] -; CHECK-NEXT: vmov.f32 s8, s2 -; CHECK-NEXT: vmov.f32 s20, s28 -; CHECK-NEXT: vmov.f32 s9, s19 -; CHECK-NEXT: vmov.f32 s28, s16 -; CHECK-NEXT: vmov.f32 s31, s17 -; CHECK-NEXT: vmov.f32 s2, s18 -; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s11, s3 -; CHECK-NEXT: vmov.f32 s10, s15 -; CHECK-NEXT: vmov.f32 s23, s29 -; CHECK-NEXT: vstrw.32 q2, [r1, #80] -; CHECK-NEXT: vmov.f32 s22, s4 -; CHECK-NEXT: vmov.f32 s21, s24 -; CHECK-NEXT: vmov.f32 s29, s12 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vmov.f32 s30, s0 -; CHECK-NEXT: vmov.f32 s0, s13 -; CHECK-NEXT: vstrw.32 q7, [r1, #48] -; CHECK-NEXT: vmov.f32 s3, s14 -; CHECK-NEXT: vmov.f32 s13, s5 -; CHECK-NEXT: vstrw.32 q0, [r1, #64] +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vldrw.u32 q0, [r0, #80] +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vmov r12, r3, d2 ; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vmov.f32 s12, s25 -; CHECK-NEXT: vmov.f32 s15, s26 -; CHECK-NEXT: vmov.f32 s14, s18 -; CHECK-NEXT: vmov.f32 s5, s19 -; CHECK-NEXT: vstrw.32 q3, [r1, #16] -; CHECK-NEXT: vmov.f32 s6, s27 +; CHECK-NEXT: vmov q4, q2 +; CHECK-NEXT: vmov.f32 s5, s13 +; CHECK-NEXT: vmov.32 q4[1], r3 +; CHECK-NEXT: vmov.f32 s6, s14 +; CHECK-NEXT: vstrw.32 q4, [r1, #16] +; CHECK-NEXT: vmov r2, lr, d0 ; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: bx lr +; CHECK-NEXT: vmov q1, q3 +; CHECK-NEXT: vmov.f32 s0, s2 +; CHECK-NEXT: vmov.32 q1[1], lr +; CHECK-NEXT: vmov.32 q3[2], r12 +; CHECK-NEXT: vmov.f32 s1, s9 +; CHECK-NEXT: vstrw.32 q1, [r1, #64] +; CHECK-NEXT: vmov.f32 s2, s10 +; CHECK-NEXT: vmov.32 q2[2], r2 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vstrw.32 q0, [r1, #80] +; CHECK-NEXT: vstrw.32 q3, [r1] +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r7, pc} entry: %s1 = getelementptr <8 x i32>, <8 x i32>* %src, i32 0 %l1 = load <8 x i32>, <8 x i32>* %s1, align 4 @@ -139,110 +118,69 @@ define void @vst3_v16i32(<16 x i32> *%src, <48 x i32> *%dst) { ; CHECK-LABEL: vst3_v16i32: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #144 -; CHECK-NEXT: sub sp, #144 -; CHECK-NEXT: vldrw.u32 q7, [r0, #96] -; CHECK-NEXT: vldrw.u32 q3, [r0, #160] -; CHECK-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-NEXT: vldrw.u32 q2, [r0, #128] -; CHECK-NEXT: vstrw.32 q7, [sp] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q7, [r0, #80] -; CHECK-NEXT: vstrw.32 q3, [sp, #128] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-NEXT: vstrw.32 q7, [sp, #80] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q7, [r0, #32] -; CHECK-NEXT: vldrw.u32 q6, [r0] -; CHECK-NEXT: vstrw.32 q3, [sp, #64] @ 16-byte Spill -; CHECK-NEXT: vstrw.32 q7, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q7, [r0, #16] -; CHECK-NEXT: vmov.f32 s16, s1 -; CHECK-NEXT: vldrw.u32 q1, [r0, #176] -; CHECK-NEXT: vmov.f32 s19, s2 -; CHECK-NEXT: vstrw.32 q7, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s17, s9 -; CHECK-NEXT: vldrw.u32 q3, [r0, #48] -; CHECK-NEXT: vmov.f32 s18, s26 -; CHECK-NEXT: vldrw.u32 q7, [sp, #64] @ 16-byte Reload -; CHECK-NEXT: vldrw.u32 q5, [r0, #144] +; CHECK-NEXT: .pad #32 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: vldrw.u32 q1, [r0, #64] +; CHECK-NEXT: vldrw.u32 q7, [r0, #128] +; CHECK-NEXT: vldrw.u32 q2, [r0, #160] +; CHECK-NEXT: vldrw.u32 q3, [r0, #96] +; CHECK-NEXT: vstrw.32 q1, [sp, #16] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q1, [r0, #80] +; CHECK-NEXT: vmov lr, r5, d14 +; CHECK-NEXT: vldrw.u32 q6, [r0, #144] +; CHECK-NEXT: vmov q4, q1 +; CHECK-NEXT: vmov r4, r3, d4 +; CHECK-NEXT: vmov.32 q4[1], r5 +; CHECK-NEXT: vldrw.u32 q5, [r0, #112] +; CHECK-NEXT: vstrw.32 q4, [sp] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q4, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s8, s10 +; CHECK-NEXT: vldrw.u32 q0, [r0, #176] +; CHECK-NEXT: vmov.f32 s9, s17 +; CHECK-NEXT: vmov.f32 s10, s18 +; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s28, s30 +; CHECK-NEXT: vstrw.32 q2, [r1, #128] +; CHECK-NEXT: vmov.f32 s29, s13 ; CHECK-NEXT: vstrw.32 q4, [r1, #16] -; CHECK-NEXT: vmov.f32 s18, s3 -; CHECK-NEXT: vmov.f32 s16, s10 -; CHECK-NEXT: vmov.f32 s17, s27 -; CHECK-NEXT: vmov.f32 s19, s11 -; CHECK-NEXT: vstrw.32 q4, [r1, #32] -; CHECK-NEXT: vmov.f32 s16, s6 -; CHECK-NEXT: vmov.f32 s19, s7 -; CHECK-NEXT: vmov.f32 s17, s15 -; CHECK-NEXT: vmov.f32 s18, s31 -; CHECK-NEXT: vstrw.32 q4, [sp, #112] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s18, s8 -; CHECK-NEXT: vmov.f64 d4, d14 -; CHECK-NEXT: vmov.f32 s2, s4 -; CHECK-NEXT: vmov.f32 s1, s8 -; CHECK-NEXT: vmov.f32 s4, s9 -; CHECK-NEXT: vldrw.u32 q2, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s17, s0 -; CHECK-NEXT: vmov.f32 s3, s13 -; CHECK-NEXT: vmov.f32 s0, s12 -; CHECK-NEXT: vmov.f64 d14, d4 -; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q0, [sp, #128] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s16, s24 -; CHECK-NEXT: vmov.f32 s19, s25 -; CHECK-NEXT: vstrw.32 q4, [sp, #96] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s17, s1 -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s15, s3 -; CHECK-NEXT: vmov q0, q5 -; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vmov.f64 d0, d14 -; CHECK-NEXT: vldrw.u32 q5, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s6, s14 -; CHECK-NEXT: vmov.f32 s7, s30 -; CHECK-NEXT: vstrw.32 q1, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s31, s1 -; CHECK-NEXT: vmov.f64 d0, d10 -; CHECK-NEXT: vmov.f32 s16, s5 -; CHECK-NEXT: vmov.f32 s19, s6 -; CHECK-NEXT: vmov.f32 s14, s7 -; CHECK-NEXT: vmov.f32 s29, s4 -; CHECK-NEXT: vldrw.u32 q1, [sp, #128] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s24, s2 -; CHECK-NEXT: vmov.f32 s30, s4 -; CHECK-NEXT: vmov.f32 s27, s3 -; CHECK-NEXT: vstrw.32 q7, [r1, #96] -; CHECK-NEXT: vmov.f32 s4, s0 -; CHECK-NEXT: vmov.f32 s7, s1 -; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s18, s10 -; CHECK-NEXT: vmov.f64 d10, d0 -; CHECK-NEXT: vldrw.u32 q0, [sp, #64] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q4, [r1, #112] -; CHECK-NEXT: vstrw.32 q0, [r1, #144] -; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q0, [r1, #160] -; CHECK-NEXT: vldrw.u32 q0, [sp, #112] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s13, s11 -; CHECK-NEXT: vldrw.u32 q2, [sp, #80] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s30, s14 +; CHECK-NEXT: vldrw.u32 q4, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: vmov r5, r0, d12 +; CHECK-NEXT: vstrw.32 q7, [r1, #32] +; CHECK-NEXT: vmov q7, q5 +; CHECK-NEXT: vmov r2, r12, d0 +; CHECK-NEXT: vmov.f32 s0, s2 +; CHECK-NEXT: vmov.32 q7[2], r5 +; CHECK-NEXT: vmov.f32 s24, s26 +; CHECK-NEXT: vstrw.32 q7, [r1, #48] +; CHECK-NEXT: vmov.f32 s1, s21 +; CHECK-NEXT: vmov q7, q4 +; CHECK-NEXT: vmov.f32 s2, s22 +; CHECK-NEXT: vmov.32 q7[1], r0 +; CHECK-NEXT: vmov.f32 s25, s5 ; CHECK-NEXT: vstrw.32 q0, [r1, #176] -; CHECK-NEXT: vldrw.u32 q0, [sp, #96] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s25, s23 -; CHECK-NEXT: vstrw.32 q3, [r1, #128] -; CHECK-NEXT: vmov.f32 s26, s11 -; CHECK-NEXT: vstrw.32 q0, [r1] -; CHECK-NEXT: vmov.f32 s6, s20 +; CHECK-NEXT: vmov.f32 s26, s6 +; CHECK-NEXT: vmov q0, q4 ; CHECK-NEXT: vstrw.32 q6, [r1, #80] -; CHECK-NEXT: vmov.f32 s5, s8 -; CHECK-NEXT: vmov.f32 s20, s9 -; CHECK-NEXT: vstrw.32 q1, [r1, #48] -; CHECK-NEXT: vmov.f32 s23, s10 -; CHECK-NEXT: vstrw.32 q5, [r1, #64] -; CHECK-NEXT: add sp, #144 +; CHECK-NEXT: vmov q6, q3 +; CHECK-NEXT: vmov.32 q6[2], r4 +; CHECK-NEXT: vmov.32 q5[1], r3 +; CHECK-NEXT: vmov.32 q1[2], r2 +; CHECK-NEXT: vmov.32 q3[1], r12 +; CHECK-NEXT: vmov.32 q0[2], lr +; CHECK-NEXT: vstrw.32 q7, [r1, #64] +; CHECK-NEXT: vstrw.32 q6, [r1, #96] +; CHECK-NEXT: vstrw.32 q5, [r1, #112] +; CHECK-NEXT: vstrw.32 q1, [r1, #144] +; CHECK-NEXT: vstrw.32 q3, [r1, #160] +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: bx lr +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %s1 = getelementptr <16 x i32>, <16 x i32>* %src, i32 0 %l1 = load <16 x i32>, <16 x i32>* %s1, align 4 @@ -340,52 +278,36 @@ define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) { ; CHECK-LABEL: vst3_v8i16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vldrw.u32 q2, [r0] -; CHECK-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-NEXT: vmov.f32 s0, s8 -; CHECK-NEXT: vmov.u16 r2, q1[1] -; CHECK-NEXT: vins.f16 s0, s4 -; CHECK-NEXT: vmov.f32 s12, s9 -; CHECK-NEXT: vins.f16 s12, s5 -; CHECK-NEXT: vmov.16 q0[4], r2 -; CHECK-NEXT: vmov.f32 s3, s12 -; CHECK-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-NEXT: vmovx.f16 s8, s8 -; CHECK-NEXT: vmovx.f16 s16, s6 -; CHECK-NEXT: vmov.f32 s1, s12 -; CHECK-NEXT: vins.f16 s17, s7 -; CHECK-NEXT: vins.f16 s1, s8 -; CHECK-NEXT: vmovx.f16 s8, s12 -; CHECK-NEXT: vins.f16 s2, s8 -; CHECK-NEXT: vmovx.f16 s8, s14 -; CHECK-NEXT: vins.f16 s16, s8 -; CHECK-NEXT: vmovx.f16 s19, s7 -; CHECK-NEXT: vmovx.f16 s8, s15 -; CHECK-NEXT: vmov.f32 s18, s15 -; CHECK-NEXT: vins.f16 s19, s8 -; CHECK-NEXT: vmovx.f16 s8, s17 -; CHECK-NEXT: vmov.f32 s17, s11 -; CHECK-NEXT: vmovx.f16 s12, s9 -; CHECK-NEXT: vins.f16 s17, s8 -; CHECK-NEXT: vmovx.f16 s8, s11 -; CHECK-NEXT: vins.f16 s18, s8 -; CHECK-NEXT: vmov.f32 s8, s13 -; CHECK-NEXT: vins.f16 s8, s12 -; CHECK-NEXT: vmovx.f16 s12, s10 -; CHECK-NEXT: vins.f16 s14, s12 -; CHECK-NEXT: vrev32.16 q1, q1 -; CHECK-NEXT: vmovx.f16 s12, s13 -; CHECK-NEXT: vmovx.f16 s4, s6 -; CHECK-NEXT: vins.f16 s5, s12 -; CHECK-NEXT: vmov.f32 s11, s14 -; CHECK-NEXT: vins.f16 s10, s4 -; CHECK-NEXT: vmov.f32 s9, s5 -; CHECK-NEXT: vstrw.32 q4, [r1, #32] -; CHECK-NEXT: vstrw.32 q2, [r1, #16] +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q2, [r0, #32] +; CHECK-NEXT: vmovx.f16 s6, s9 +; CHECK-NEXT: vmov.f32 s5, s1 +; CHECK-NEXT: vins.f16 s5, s6 +; CHECK-NEXT: vmovx.f16 s6, s3 +; CHECK-NEXT: vmovx.f16 s4, s0 +; CHECK-NEXT: vmovx.f16 s14, s10 +; CHECK-NEXT: vins.f16 s10, s6 +; CHECK-NEXT: vins.f16 s9, s4 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmovx.f16 s10, s2 +; CHECK-NEXT: vmov.f32 s4, s9 +; CHECK-NEXT: vmovx.f16 s9, s11 +; CHECK-NEXT: vins.f16 s11, s10 +; CHECK-NEXT: vmovx.f16 s10, s1 +; CHECK-NEXT: vmov.f32 s12, s0 +; CHECK-NEXT: vmov.f32 s15, s3 +; CHECK-NEXT: vins.f16 s12, s14 +; CHECK-NEXT: vmov.f32 s13, s1 +; CHECK-NEXT: vmovx.f16 s1, s8 +; CHECK-NEXT: vins.f16 s8, s10 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: vins.f16 s2, s1 +; CHECK-NEXT: vmov.f32 s1, s8 +; CHECK-NEXT: vmov.f32 s14, s11 +; CHECK-NEXT: vins.f16 s15, s9 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: vstrw.32 q1, [r1, #16] ; CHECK-NEXT: vstrw.32 q0, [r1] -; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0 @@ -406,112 +328,66 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #64 -; CHECK-NEXT: sub sp, #64 -; CHECK-NEXT: vldrw.u32 q2, [r0, #80] -; CHECK-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-NEXT: vldrw.u32 q6, [r0] -; CHECK-NEXT: vstrw.32 q1, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vmovx.f16 s0, s10 -; CHECK-NEXT: vmovx.f16 s4, s6 -; CHECK-NEXT: vins.f16 s1, s7 -; CHECK-NEXT: vins.f16 s4, s0 -; CHECK-NEXT: vmovx.f16 s0, s11 -; CHECK-NEXT: vmovx.f16 s7, s7 -; CHECK-NEXT: vmov.f32 s12, s4 -; CHECK-NEXT: vins.f16 s7, s0 -; CHECK-NEXT: vmovx.f16 s4, s1 -; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vmov.f32 s18, s11 -; CHECK-NEXT: vmov.f32 s15, s7 -; CHECK-NEXT: vstrw.32 q6, [sp] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s13, s3 -; CHECK-NEXT: vins.f16 s13, s4 -; CHECK-NEXT: vmovx.f16 s4, s3 -; CHECK-NEXT: vins.f16 s18, s4 -; CHECK-NEXT: vldrw.u32 q1, [r0, #32] -; CHECK-NEXT: vmov.f32 s20, s24 -; CHECK-NEXT: vins.f16 s20, s4 -; CHECK-NEXT: vmov.u16 r2, q1[1] -; CHECK-NEXT: vmov.16 q5[4], r2 -; CHECK-NEXT: vstrw.32 q1, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s23, s25 -; CHECK-NEXT: vmovx.f16 s4, s24 -; CHECK-NEXT: vldrw.u32 q6, [r0, #64] -; CHECK-NEXT: vmov.f32 s14, s18 -; CHECK-NEXT: vins.f16 s23, s5 -; CHECK-NEXT: vstrw.32 q3, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s5, s24 -; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s28, s0 -; CHECK-NEXT: vins.f16 s5, s4 -; CHECK-NEXT: vmovx.f16 s4, s24 -; CHECK-NEXT: vmov.u16 r0, q3[1] -; CHECK-NEXT: vins.f16 s28, s12 -; CHECK-NEXT: vins.f16 s22, s4 -; CHECK-NEXT: vmov.f32 s4, s1 -; CHECK-NEXT: vmov.16 q7[4], r0 -; CHECK-NEXT: vins.f16 s4, s13 -; CHECK-NEXT: vmov.f32 s21, s5 -; CHECK-NEXT: vmov.f32 s31, s4 -; CHECK-NEXT: vldrw.u32 q1, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vmov.f32 s29, s8 -; CHECK-NEXT: vins.f16 s29, s0 -; CHECK-NEXT: vmovx.f16 s0, s8 -; CHECK-NEXT: vins.f16 s30, s0 -; CHECK-NEXT: vmovx.f16 s4, s6 -; CHECK-NEXT: vmovx.f16 s0, s26 -; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s4, s0 -; CHECK-NEXT: vins.f16 s5, s7 -; CHECK-NEXT: vmovx.f16 s7, s7 -; CHECK-NEXT: vmovx.f16 s0, s27 -; CHECK-NEXT: vins.f16 s7, s0 -; CHECK-NEXT: vmovx.f16 s0, s5 -; CHECK-NEXT: vmov.f32 s13, s19 -; CHECK-NEXT: vmovx.f16 s6, s1 -; CHECK-NEXT: vins.f16 s13, s0 -; CHECK-NEXT: vmov.f32 s14, s27 -; CHECK-NEXT: vmovx.f16 s0, s19 -; CHECK-NEXT: vmov.f32 s12, s25 -; CHECK-NEXT: vins.f16 s14, s0 -; CHECK-NEXT: vmov.f32 s0, s9 -; CHECK-NEXT: vins.f16 s0, s6 -; CHECK-NEXT: vmovx.f16 s6, s2 -; CHECK-NEXT: vins.f16 s10, s6 -; CHECK-NEXT: vmovx.f16 s6, s9 -; CHECK-NEXT: vmov.f32 s3, s10 -; CHECK-NEXT: vldrw.u32 q2, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s5, s13 -; CHECK-NEXT: vstrw.32 q7, [r1, #48] -; CHECK-NEXT: vrev32.16 q2, q2 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vmovx.f16 s8, s17 -; CHECK-NEXT: vins.f16 s9, s6 -; CHECK-NEXT: vmovx.f16 s6, s10 -; CHECK-NEXT: vins.f16 s12, s8 -; CHECK-NEXT: vmovx.f16 s8, s18 -; CHECK-NEXT: vmov.f32 s10, s18 -; CHECK-NEXT: vldrw.u32 q4, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s26, s8 -; CHECK-NEXT: vmov.f32 s15, s26 -; CHECK-NEXT: vmovx.f16 s8, s25 -; CHECK-NEXT: vrev32.16 q6, q4 -; CHECK-NEXT: vins.f16 s2, s6 -; CHECK-NEXT: vins.f16 s25, s8 -; CHECK-NEXT: vmov.f32 s1, s9 -; CHECK-NEXT: vmovx.f16 s8, s26 -; CHECK-NEXT: vstrw.32 q0, [r1, #64] -; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s10, s8 -; CHECK-NEXT: vmov.f32 s6, s14 -; CHECK-NEXT: vmov.f32 s14, s10 +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: vldrw.u32 q5, [r0, #64] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vmovx.f16 s6, s22 +; CHECK-NEXT: vmov.f32 s4, s0 +; CHECK-NEXT: vins.f16 s4, s6 +; CHECK-NEXT: vmovx.f16 s8, s2 +; CHECK-NEXT: vmov.f32 s6, s23 +; CHECK-NEXT: vmovx.f16 s5, s19 +; CHECK-NEXT: vins.f16 s6, s8 +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vmovx.f16 s7, s17 +; CHECK-NEXT: vmov.f32 s25, s1 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmovx.f16 s14, s18 +; CHECK-NEXT: vins.f16 s15, s5 +; CHECK-NEXT: vmovx.f16 s5, s0 +; CHECK-NEXT: vins.f16 s17, s5 +; CHECK-NEXT: vmovx.f16 s5, s3 +; CHECK-NEXT: vins.f16 s18, s5 +; CHECK-NEXT: vmovx.f16 s5, s8 +; CHECK-NEXT: vins.f16 s25, s7 +; CHECK-NEXT: vmovx.f16 s7, s21 +; CHECK-NEXT: vins.f16 s21, s5 +; CHECK-NEXT: vmov.f32 s29, s9 +; CHECK-NEXT: vmovx.f16 s5, s11 +; CHECK-NEXT: vmov.f32 s12, s8 +; CHECK-NEXT: vins.f16 s22, s5 +; CHECK-NEXT: vins.f16 s29, s7 +; CHECK-NEXT: vmovx.f16 s5, s23 +; CHECK-NEXT: vmov.f32 s7, s3 +; CHECK-NEXT: vins.f16 s12, s14 +; CHECK-NEXT: vmovx.f16 s14, s10 +; CHECK-NEXT: vmov.f32 s27, s18 +; CHECK-NEXT: vins.f16 s7, s5 +; CHECK-NEXT: vmov.f32 s5, s1 +; CHECK-NEXT: vmovx.f16 s1, s1 +; CHECK-NEXT: vmov.f32 s13, s9 +; CHECK-NEXT: vmovx.f16 s18, s20 +; CHECK-NEXT: vmovx.f16 s9, s9 +; CHECK-NEXT: vins.f16 s19, s14 +; CHECK-NEXT: vins.f16 s20, s1 +; CHECK-NEXT: vmov.f32 s26, s2 +; CHECK-NEXT: vins.f16 s2, s18 +; CHECK-NEXT: vmovx.f16 s18, s16 +; CHECK-NEXT: vins.f16 s16, s9 +; CHECK-NEXT: vmov.f32 s14, s19 +; CHECK-NEXT: vmov.f32 s24, s17 ; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: vmov.f32 s13, s25 -; CHECK-NEXT: vstrw.32 q0, [r1, #80] -; CHECK-NEXT: vstrw.32 q3, [r1, #16] -; CHECK-NEXT: add sp, #64 +; CHECK-NEXT: vmov.f32 s30, s10 +; CHECK-NEXT: vins.f16 s10, s18 +; CHECK-NEXT: vmov.f32 s1, s20 +; CHECK-NEXT: vstrw.32 q6, [r1, #64] +; CHECK-NEXT: vmov.f32 s9, s16 +; CHECK-NEXT: vstrw.32 q3, [r1, #80] +; CHECK-NEXT: vmov.f32 s28, s21 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s31, s22 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vstrw.32 q7, [r1, #16] ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -695,205 +571,94 @@ define void @vst3_v16i8(<16 x i8> *%src, <48 x i8> *%dst) { ; CHECK-LABEL: vst3_v16i8: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vldrw.u32 q3, [r0] -; CHECK-NEXT: vldrw.u32 q2, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0, #32] -; CHECK-NEXT: vmov.u8 r3, q3[0] +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: vldrw.u32 q3, [r0, #16] +; CHECK-NEXT: vldrw.u32 q2, [r0, #32] +; CHECK-NEXT: vmov.u8 r2, q3[0] +; CHECK-NEXT: vmov.u8 r12, q3[1] +; CHECK-NEXT: vmov.8 q0[0], r2 ; CHECK-NEXT: vmov.u8 r0, q2[0] -; CHECK-NEXT: vmov.8 q5[0], r3 -; CHECK-NEXT: vmov.u8 r2, q1[0] -; CHECK-NEXT: vmov.8 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q3[1] -; CHECK-NEXT: vmov.8 q5[3], r0 -; CHECK-NEXT: vmov.u8 r0, q2[1] -; CHECK-NEXT: vmov.8 q5[4], r0 -; CHECK-NEXT: vmov.u8 r0, q3[2] -; CHECK-NEXT: vmov.8 q5[6], r0 -; CHECK-NEXT: vmov.u8 r0, q2[2] -; CHECK-NEXT: vmov.8 q5[7], r0 -; CHECK-NEXT: vmov.u8 r0, q3[3] -; CHECK-NEXT: vmov.8 q5[9], r0 -; CHECK-NEXT: vmov.u8 r0, q2[3] -; CHECK-NEXT: vmov.8 q5[10], r0 -; CHECK-NEXT: vmov.u8 r0, q3[4] -; CHECK-NEXT: vmov.8 q4[2], r2 -; CHECK-NEXT: vmov.u8 r2, q1[2] -; CHECK-NEXT: vmov.8 q5[12], r0 -; CHECK-NEXT: vmov.u8 r0, q2[4] -; CHECK-NEXT: vmov.8 q4[8], r2 -; CHECK-NEXT: vmov.u8 r2, q1[3] -; CHECK-NEXT: vmov.8 q5[13], r0 -; CHECK-NEXT: vmov.u8 r0, q3[5] -; CHECK-NEXT: vmov.8 q5[15], r0 -; CHECK-NEXT: vmov.8 q4[11], r2 -; CHECK-NEXT: vmov.u8 r2, q1[4] -; CHECK-NEXT: vmov.u8 r0, q5[0] -; CHECK-NEXT: vmov.8 q4[14], r2 -; CHECK-NEXT: vmov.8 q0[0], r0 -; CHECK-NEXT: vmov.f32 s17, s4 -; CHECK-NEXT: vmov.u8 r0, q5[1] -; CHECK-NEXT: vmov.8 q0[1], r0 -; CHECK-NEXT: vmov.u8 r2, q4[2] -; CHECK-NEXT: vmov.8 q0[2], r2 -; CHECK-NEXT: vmov.u8 r0, q5[3] -; CHECK-NEXT: vmov.8 q0[3], r0 -; CHECK-NEXT: vmov.u8 r0, q5[4] -; CHECK-NEXT: vmov.8 q0[4], r0 -; CHECK-NEXT: vmov.u8 r0, q4[5] -; CHECK-NEXT: vmov.8 q0[5], r0 -; CHECK-NEXT: vmov.u8 r0, q5[6] -; CHECK-NEXT: vmov.8 q0[6], r0 -; CHECK-NEXT: vmov.u8 r0, q5[7] -; CHECK-NEXT: vmov.8 q0[7], r0 -; CHECK-NEXT: vmov.u8 r0, q4[8] -; CHECK-NEXT: vmov.8 q0[8], r0 -; CHECK-NEXT: vmov.u8 r0, q5[9] -; CHECK-NEXT: vmov.8 q0[9], r0 -; CHECK-NEXT: vmov.u8 r0, q5[10] -; CHECK-NEXT: vmov.8 q0[10], r0 -; CHECK-NEXT: vmov.u8 r0, q4[11] -; CHECK-NEXT: vmov.8 q0[11], r0 -; CHECK-NEXT: vmov.u8 r0, q5[12] -; CHECK-NEXT: vmov.8 q0[12], r0 -; CHECK-NEXT: vmov.u8 r0, q5[13] -; CHECK-NEXT: vmov.8 q0[13], r0 -; CHECK-NEXT: vmov.u8 r0, q4[14] -; CHECK-NEXT: vmov.8 q0[14], r0 -; CHECK-NEXT: vmov.u8 r0, q5[15] -; CHECK-NEXT: vmov.8 q0[15], r0 -; CHECK-NEXT: vmov.u8 r0, q2[5] -; CHECK-NEXT: vmov.8 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.8 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[6] -; CHECK-NEXT: vmov.8 q5[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.8 q5[4], r0 -; CHECK-NEXT: vmov.u8 r0, q2[7] -; CHECK-NEXT: vmov.8 q5[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.8 q5[7], r0 -; CHECK-NEXT: vmov.u8 r0, q2[8] -; CHECK-NEXT: vmov.8 q5[9], r0 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmov.8 q5[10], r0 -; CHECK-NEXT: vmov.u8 r0, q2[9] -; CHECK-NEXT: vmov.8 q5[12], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.8 q5[13], r0 -; CHECK-NEXT: vmov.u8 r0, q2[10] -; CHECK-NEXT: vmov.8 q5[15], r0 -; CHECK-NEXT: vstrw.32 q0, [r1] -; CHECK-NEXT: vmov.u8 r0, q5[0] -; CHECK-NEXT: vmov.8 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q5[1] -; CHECK-NEXT: vmov.8 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q3[7] -; CHECK-NEXT: vmov.8 q6[5], r0 -; CHECK-NEXT: vmov.u8 r0, q3[8] -; CHECK-NEXT: vmov.8 q6[8], r0 -; CHECK-NEXT: vmov.u8 r0, q3[9] -; CHECK-NEXT: vmov.8 q6[11], r0 -; CHECK-NEXT: vmov.f32 s24, s13 -; CHECK-NEXT: vmov.f32 s27, s14 -; CHECK-NEXT: vmov.u8 r0, q6[2] -; CHECK-NEXT: vmov.8 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q5[3] -; CHECK-NEXT: vmov.8 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q5[4] -; CHECK-NEXT: vmov.8 q4[4], r0 -; CHECK-NEXT: vmov.u8 r0, q6[5] -; CHECK-NEXT: vmov.8 q4[5], r0 -; CHECK-NEXT: vmov.u8 r0, q5[6] -; CHECK-NEXT: vmov.8 q4[6], r0 -; CHECK-NEXT: vmov.u8 r0, q5[7] -; CHECK-NEXT: vmov.8 q4[7], r0 -; CHECK-NEXT: vmov.u8 r0, q6[8] -; CHECK-NEXT: vmov.8 q4[8], r0 -; CHECK-NEXT: vmov.u8 r0, q5[9] -; CHECK-NEXT: vmov.8 q4[9], r0 -; CHECK-NEXT: vmov.u8 r0, q5[10] -; CHECK-NEXT: vmov.8 q4[10], r0 -; CHECK-NEXT: vmov.u8 r0, q6[11] -; CHECK-NEXT: vmov.8 q4[11], r0 -; CHECK-NEXT: vmov.u8 r0, q5[12] -; CHECK-NEXT: vmov.8 q4[12], r0 -; CHECK-NEXT: vmov.u8 r0, q5[13] -; CHECK-NEXT: vmov.8 q4[13], r0 -; CHECK-NEXT: vmov.u8 r0, q6[14] -; CHECK-NEXT: vmov.8 q4[14], r0 -; CHECK-NEXT: vmov.u8 r0, q5[15] -; CHECK-NEXT: vmov.8 q4[15], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.8 q5[0], r0 -; CHECK-NEXT: vmov.u8 r0, q3[11] -; CHECK-NEXT: vmov.8 q5[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.8 q5[3], r0 -; CHECK-NEXT: vmov.u8 r0, q3[12] -; CHECK-NEXT: vmov.8 q5[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.8 q5[6], r0 -; CHECK-NEXT: vmov.u8 r0, q3[13] -; CHECK-NEXT: vmov.8 q5[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.8 q5[9], r0 -; CHECK-NEXT: vmov.u8 r0, q3[14] -; CHECK-NEXT: vmov.8 q5[10], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.8 q5[12], r0 -; CHECK-NEXT: vmov.u8 r0, q3[15] -; CHECK-NEXT: vmov.8 q5[13], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.8 q5[15], r0 -; CHECK-NEXT: vstrw.32 q4, [r1, #16] -; CHECK-NEXT: vmov.u8 r0, q5[0] -; CHECK-NEXT: vmov.8 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q5[1] -; CHECK-NEXT: vmov.8 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[11] -; CHECK-NEXT: vmov.8 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q2[12] -; CHECK-NEXT: vmov.8 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q2[13] -; CHECK-NEXT: vmov.8 q3[8], r0 -; CHECK-NEXT: vmov.u8 r0, q2[14] -; CHECK-NEXT: vmov.8 q3[11], r0 -; CHECK-NEXT: vmov.u8 r0, q2[15] -; CHECK-NEXT: vmov.8 q3[14], r0 -; CHECK-NEXT: vmov.u8 r0, q3[2] +; CHECK-NEXT: vmov q1, q0 +; CHECK-NEXT: vmov.u8 r5, q3[3] +; CHECK-NEXT: vmov.8 q1[1], r12 +; CHECK-NEXT: vmov.u8 lr, q3[4] ; CHECK-NEXT: vmov.8 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q5[3] -; CHECK-NEXT: vmov.8 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q5[4] -; CHECK-NEXT: vmov.8 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q3[5] +; CHECK-NEXT: vmov.u8 r0, q2[1] +; CHECK-NEXT: vmov.8 q1[3], r5 +; CHECK-NEXT: vmov.u8 r6, q3[6] +; CHECK-NEXT: vmov.8 q1[4], lr +; CHECK-NEXT: vmov.u8 r8, q3[7] ; CHECK-NEXT: vmov.8 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q5[6] -; CHECK-NEXT: vmov.8 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q5[7] -; CHECK-NEXT: vmov.8 q1[7], r0 -; CHECK-NEXT: vmov.u8 r0, q3[8] +; CHECK-NEXT: vmov.u8 r0, q2[2] +; CHECK-NEXT: vmov.8 q1[6], r6 +; CHECK-NEXT: vmov.u8 r7, q3[9] +; CHECK-NEXT: vmov.8 q1[7], r8 +; CHECK-NEXT: vmov.u8 r9, q3[10] ; CHECK-NEXT: vmov.8 q1[8], r0 -; CHECK-NEXT: vmov.u8 r0, q5[9] -; CHECK-NEXT: vmov.8 q1[9], r0 -; CHECK-NEXT: vmov.u8 r0, q5[10] -; CHECK-NEXT: vmov.8 q1[10], r0 -; CHECK-NEXT: vmov.u8 r0, q3[11] +; CHECK-NEXT: vmov.u8 r0, q2[3] +; CHECK-NEXT: vmov.8 q1[9], r7 +; CHECK-NEXT: vmov.u8 r3, q3[12] +; CHECK-NEXT: vmov.8 q1[10], r9 +; CHECK-NEXT: vmov.u8 r4, q2[4] ; CHECK-NEXT: vmov.8 q1[11], r0 -; CHECK-NEXT: vmov.u8 r0, q5[12] -; CHECK-NEXT: vmov.8 q1[12], r0 -; CHECK-NEXT: vmov.u8 r0, q5[13] +; CHECK-NEXT: vmov.u8 r0, q3[13] +; CHECK-NEXT: vmov.8 q1[12], r3 +; CHECK-NEXT: vmov.u8 r2, q3[15] ; CHECK-NEXT: vmov.8 q1[13], r0 -; CHECK-NEXT: vmov.u8 r0, q3[14] -; CHECK-NEXT: vmov.8 q1[14], r0 -; CHECK-NEXT: vmov.u8 r0, q5[15] -; CHECK-NEXT: vmov.8 q1[15], r0 -; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: bx lr +; CHECK-NEXT: vmov.8 q1[14], r4 +; CHECK-NEXT: vmov.u8 r4, q2[5] +; CHECK-NEXT: vmov.8 q0[1], r4 +; CHECK-NEXT: vmov.u8 r4, q3[2] +; CHECK-NEXT: vmov.8 q0[2], r4 +; CHECK-NEXT: vmov.8 q1[15], r2 +; CHECK-NEXT: vmov.8 q0[3], r5 +; CHECK-NEXT: vmov.u8 r5, q2[6] +; CHECK-NEXT: vmov.8 q0[4], r5 +; CHECK-NEXT: vmov.u8 r5, q3[5] +; CHECK-NEXT: vmov.8 q0[5], r5 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vmov.8 q0[6], r6 +; CHECK-NEXT: vmov.u8 r6, q2[7] +; CHECK-NEXT: vmov.8 q0[7], r6 +; CHECK-NEXT: vmov.u8 r6, q3[8] +; CHECK-NEXT: vmov.8 q0[8], r6 +; CHECK-NEXT: vmov.8 q0[9], r7 +; CHECK-NEXT: vmov.u8 r7, q2[8] +; CHECK-NEXT: vmov.8 q0[10], r7 +; CHECK-NEXT: vmov.u8 r7, q3[11] +; CHECK-NEXT: vmov.8 q0[11], r7 +; CHECK-NEXT: vmov.8 q0[12], r3 +; CHECK-NEXT: vmov.u8 r3, q2[9] +; CHECK-NEXT: vmov.8 q0[13], r3 +; CHECK-NEXT: vmov.u8 r3, q3[14] +; CHECK-NEXT: vmov.8 q0[14], r3 +; CHECK-NEXT: vmov.8 q0[15], r2 +; CHECK-NEXT: vmov.u8 r2, q2[10] +; CHECK-NEXT: vmov.8 q3[0], r2 +; CHECK-NEXT: vmov.u8 r2, q2[11] +; CHECK-NEXT: vmov.8 q3[1], r12 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.8 q3[2], r4 +; CHECK-NEXT: vmov.8 q3[3], r2 +; CHECK-NEXT: vmov.u8 r2, q2[12] +; CHECK-NEXT: vmov.8 q3[4], lr +; CHECK-NEXT: vmov.8 q3[5], r5 +; CHECK-NEXT: vmov.8 q3[6], r2 +; CHECK-NEXT: vmov.u8 r2, q2[13] +; CHECK-NEXT: vmov.8 q3[7], r8 +; CHECK-NEXT: vmov.8 q3[8], r6 +; CHECK-NEXT: vmov.8 q3[9], r2 +; CHECK-NEXT: vmov.u8 r2, q2[14] +; CHECK-NEXT: vmov.8 q3[10], r9 +; CHECK-NEXT: vmov.8 q3[11], r7 +; CHECK-NEXT: vmov.8 q3[12], r2 +; CHECK-NEXT: vmov.8 q3[13], r0 +; CHECK-NEXT: vmov.u8 r0, q2[15] +; CHECK-NEXT: vmov.8 q3[14], r3 +; CHECK-NEXT: vmov.8 q3[15], r0 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0 %l1 = load <16 x i8>, <16 x i8>* %s1, align 4 @@ -941,40 +706,25 @@ define void @vst3_v4i64(<4 x i64> *%src, <12 x i64> *%dst) { ; CHECK-LABEL: vst3_v4i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vldrw.u32 q3, [r0, #48] -; CHECK-NEXT: vldrw.u32 q7, [r0, #32] -; CHECK-NEXT: vldrw.u32 q0, [r0, #80] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmov.f32 s16, s14 -; CHECK-NEXT: vldrw.u32 q6, [r0, #16] -; CHECK-NEXT: vmov.f32 s17, s15 -; CHECK-NEXT: vldrw.u32 q2, [r0, #64] -; CHECK-NEXT: vmov.f64 d7, d15 -; CHECK-NEXT: vmov.f32 s18, s2 -; CHECK-NEXT: vmov.f32 s19, s3 -; CHECK-NEXT: vmov.f32 s20, s4 -; CHECK-NEXT: vstrw.32 q4, [r1, #80] -; CHECK-NEXT: vmov.f32 s21, s5 -; CHECK-NEXT: vmov.f32 s22, s28 -; CHECK-NEXT: vmov.f32 s23, s29 -; CHECK-NEXT: vmov.f32 s4, s8 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vmov.f32 s5, s9 -; CHECK-NEXT: vmov.f32 s28, s24 -; CHECK-NEXT: vstrw.32 q1, [r1, #16] -; CHECK-NEXT: vmov.f32 s29, s25 -; CHECK-NEXT: vmov.f32 s30, s12 -; CHECK-NEXT: vmov.f32 s31, s13 -; CHECK-NEXT: vmov.f32 s2, s26 -; CHECK-NEXT: vstrw.32 q7, [r1, #48] -; CHECK-NEXT: vmov.f32 s3, s27 -; CHECK-NEXT: vmov.f32 s8, s14 -; CHECK-NEXT: vstrw.32 q0, [r1, #64] -; CHECK-NEXT: vmov.f32 s9, s15 -; CHECK-NEXT: vstrw.32 q2, [r1, #32] -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #80] +; CHECK-NEXT: vldrw.u32 q4, [r0, #64] +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: vmov.f64 d7, d3 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vmov.f64 d6, d4 +; CHECK-NEXT: vmov.f64 d3, d1 +; CHECK-NEXT: vstrw.32 q3, [r1, #80] +; CHECK-NEXT: vmov.f64 d10, d8 +; CHECK-NEXT: vstrw.32 q1, [r1, #64] +; CHECK-NEXT: vmov.f64 d11, d5 +; CHECK-NEXT: vmov.f64 d8, d0 +; CHECK-NEXT: vstrw.32 q5, [r1, #16] +; CHECK-NEXT: vstrw.32 q4, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x i64>, <4 x i64>* %src, i32 0 @@ -1021,26 +771,18 @@ define void @vst3_v4f32(<4 x float> *%src, <12 x float> *%dst) { ; CHECK-LABEL: vst3_v4f32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vldrw.u32 q3, [r0] ; CHECK-NEXT: vldrw.u32 q1, [r0, #32] -; CHECK-NEXT: vmov.f32 s8, s1 -; CHECK-NEXT: vmov.f32 s9, s5 -; CHECK-NEXT: vmov.f32 s18, s4 -; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vmov.f32 s11, s2 -; CHECK-NEXT: vmov.f32 s10, s14 -; CHECK-NEXT: vmov.f32 s16, s12 -; CHECK-NEXT: vstrw.32 q2, [r1, #16] -; CHECK-NEXT: vmov.f32 s17, s0 -; CHECK-NEXT: vmov.f32 s19, s13 -; CHECK-NEXT: vmov.f32 s5, s15 -; CHECK-NEXT: vstrw.32 q4, [r1] -; CHECK-NEXT: vmov.f32 s6, s3 -; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmov q3, q0 +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s5 +; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: vstrw.32 q3, [r1, #16] +; CHECK-NEXT: vmov.f32 s8, s6 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vmov.f32 s9, s1 +; CHECK-NEXT: vmov.f32 s11, s7 +; CHECK-NEXT: vstrw.32 q2, [r1, #32] ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0 @@ -1061,45 +803,30 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #16 -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: vldrw.u32 q6, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0, #80] -; CHECK-NEXT: vldrw.u32 q4, [r0, #16] -; CHECK-NEXT: vldrw.u32 q2, [r0, #48] -; CHECK-NEXT: vstrw.32 q6, [sp] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q7, [r0, #32] -; CHECK-NEXT: vldrw.u32 q1, [r0, #64] -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s20, s24 -; CHECK-NEXT: vmov.f32 s13, s19 -; CHECK-NEXT: vmov.f32 s24, s16 -; CHECK-NEXT: vmov.f32 s27, s17 -; CHECK-NEXT: vmov.f32 s2, s18 -; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s15, s3 -; CHECK-NEXT: vmov.f32 s14, s11 -; CHECK-NEXT: vmov.f32 s23, s25 -; CHECK-NEXT: vstrw.32 q3, [r1, #80] -; CHECK-NEXT: vmov.f32 s22, s4 -; CHECK-NEXT: vmov.f32 s21, s28 -; CHECK-NEXT: vmov.f32 s25, s8 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vmov.f32 s26, s0 -; CHECK-NEXT: vmov.f32 s0, s9 -; CHECK-NEXT: vstrw.32 q6, [r1, #48] -; CHECK-NEXT: vmov.f32 s3, s10 -; CHECK-NEXT: vmov.f32 s9, s5 +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: vldrw.u32 q3, [r0, #80] +; CHECK-NEXT: vldrw.u32 q2, [r0, #64] +; CHECK-NEXT: vmov q6, q0 +; CHECK-NEXT: vmov q7, q1 +; CHECK-NEXT: vmov.f32 s17, s5 +; CHECK-NEXT: vmov.f32 s21, s1 +; CHECK-NEXT: vmov.f32 s26, s8 +; CHECK-NEXT: vmov.f32 s30, s12 +; CHECK-NEXT: vstrw.32 q6, [r1] +; CHECK-NEXT: vmov.f32 s1, s13 +; CHECK-NEXT: vstrw.32 q7, [r1, #48] +; CHECK-NEXT: vmov.f32 s5, s9 ; CHECK-NEXT: vstrw.32 q0, [r1, #64] -; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vmov.f32 s8, s29 -; CHECK-NEXT: vmov.f32 s11, s30 -; CHECK-NEXT: vmov.f32 s10, s18 -; CHECK-NEXT: vmov.f32 s5, s19 -; CHECK-NEXT: vstrw.32 q2, [r1, #16] -; CHECK-NEXT: vmov.f32 s6, s31 -; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vmov.f32 s16, s14 +; CHECK-NEXT: vstrw.32 q1, [r1, #16] +; CHECK-NEXT: vmov.f32 s18, s6 +; CHECK-NEXT: vmov.f32 s19, s15 +; CHECK-NEXT: vmov.f32 s20, s10 +; CHECK-NEXT: vstrw.32 q4, [r1, #80] +; CHECK-NEXT: vmov.f32 s22, s2 +; CHECK-NEXT: vmov.f32 s23, s11 +; CHECK-NEXT: vstrw.32 q5, [r1, #32] ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -1121,106 +848,77 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #144 -; CHECK-NEXT: sub sp, #144 -; CHECK-NEXT: vldrw.u32 q5, [r0, #112] -; CHECK-NEXT: vldrw.u32 q7, [r0, #32] -; CHECK-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-NEXT: vldrw.u32 q2, [r0, #128] -; CHECK-NEXT: vstrw.32 q5, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q5, [r0, #96] -; CHECK-NEXT: vldrw.u32 q6, [r0] -; CHECK-NEXT: vstrw.32 q7, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vstrw.32 q5, [sp] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q5, [r0, #80] -; CHECK-NEXT: vldrw.u32 q7, [r0, #16] -; CHECK-NEXT: vldrw.u32 q4, [r0, #160] -; CHECK-NEXT: vstrw.32 q5, [sp, #80] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s12, s1 -; CHECK-NEXT: vstrw.32 q7, [sp, #64] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s13, s9 -; CHECK-NEXT: vmov.f32 s15, s2 -; CHECK-NEXT: vldrw.u32 q1, [r0, #176] -; CHECK-NEXT: vmov.f32 s14, s26 -; CHECK-NEXT: vldrw.u32 q5, [r0, #48] -; CHECK-NEXT: vldrw.u32 q7, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q4, [sp, #128] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q4, [r0, #144] -; CHECK-NEXT: vstrw.32 q3, [r1, #16] -; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: .pad #96 +; CHECK-NEXT: sub sp, #96 +; CHECK-NEXT: vldrw.u32 q0, [r0, #160] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vldrw.u32 q1, [r0, #96] +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] +; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q0, [r0, #144] +; CHECK-NEXT: vmov q2, q4 +; CHECK-NEXT: vldrw.u32 q6, [r0, #176] +; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q0, [r0, #128] +; CHECK-NEXT: vldrw.u32 q5, [r0, #64] +; CHECK-NEXT: vstrw.32 q1, [sp, #80] @ 16-byte Spill +; CHECK-NEXT: vmov.f32 s9, s1 +; CHECK-NEXT: vmov q7, q3 +; CHECK-NEXT: vstrw.32 q2, [r1, #16] +; CHECK-NEXT: vmov.f32 s8, s2 +; CHECK-NEXT: vmov.f32 s9, s5 +; CHECK-NEXT: vmov.f32 s10, s6 +; CHECK-NEXT: vmov q1, q3 +; CHECK-NEXT: vmov.f32 s11, s3 +; CHECK-NEXT: vstrw.32 q2, [r1, #32] +; CHECK-NEXT: vmov q2, q6 +; CHECK-NEXT: vmov.f32 s25, s5 +; CHECK-NEXT: vmov.f32 s24, s10 +; CHECK-NEXT: vmov.f32 s27, s11 +; CHECK-NEXT: vmov.f64 d2, d4 +; CHECK-NEXT: vldrw.u32 q2, [sp, #64] @ 16-byte Reload ; CHECK-NEXT: vmov.f32 s12, s10 -; CHECK-NEXT: vmov.f32 s13, s27 ; CHECK-NEXT: vmov.f32 s15, s11 -; CHECK-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-NEXT: vmov.f32 s13, s23 -; CHECK-NEXT: vmov.f32 s12, s6 -; CHECK-NEXT: vmov.f32 s15, s7 -; CHECK-NEXT: vmov.f32 s14, s31 -; CHECK-NEXT: vstrw.32 q3, [sp, #112] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s13, s0 -; CHECK-NEXT: vmov.f32 s14, s8 -; CHECK-NEXT: vmov.f64 d4, d14 -; CHECK-NEXT: vmov.f32 s0, s20 -; CHECK-NEXT: vmov.f32 s3, s21 -; CHECK-NEXT: vmov.f64 d10, d2 -; CHECK-NEXT: vldrw.u32 q1, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s2, s20 -; CHECK-NEXT: vmov.f32 s1, s8 -; CHECK-NEXT: vmov.f64 d14, d2 -; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q0, [sp, #128] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s20, s9 -; CHECK-NEXT: vldrw.u32 q2, [sp] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s23, s30 -; CHECK-NEXT: vmov.f32 s12, s24 -; CHECK-NEXT: vstrw.32 q5, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s15, s25 -; CHECK-NEXT: vstrw.32 q3, [sp, #96] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s21, s1 -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s15, s3 +; CHECK-NEXT: vmov q2, q5 +; CHECK-NEXT: vmov.f32 s10, s0 ; CHECK-NEXT: vmov q0, q4 -; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vmov.f64 d0, d14 -; CHECK-NEXT: vldrw.u32 q4, [sp, #64] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s31, s1 -; CHECK-NEXT: vmov.f64 d0, d8 -; CHECK-NEXT: vmov.f32 s20, s9 -; CHECK-NEXT: vmov.f32 s23, s10 -; CHECK-NEXT: vmov.f32 s14, s11 -; CHECK-NEXT: vmov.f32 s29, s8 -; CHECK-NEXT: vldrw.u32 q2, [sp, #128] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s24, s2 -; CHECK-NEXT: vmov.f32 s30, s8 -; CHECK-NEXT: vmov.f32 s27, s3 -; CHECK-NEXT: vstrw.32 q7, [r1, #96] -; CHECK-NEXT: vmov.f32 s8, s0 -; CHECK-NEXT: vmov.f32 s11, s1 -; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s22, s6 -; CHECK-NEXT: vmov.f64 d8, d0 -; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q5, [r1, #112] -; CHECK-NEXT: vstrw.32 q0, [r1, #144] -; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q0, [r1, #160] -; CHECK-NEXT: vldrw.u32 q0, [sp, #112] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vstrw.32 q2, [sp, #32] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d4, d2 +; CHECK-NEXT: vmov.f32 s26, s6 ; CHECK-NEXT: vldrw.u32 q1, [sp, #80] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q0, [r1, #176] -; CHECK-NEXT: vldrw.u32 q0, [sp, #96] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s25, s19 +; CHECK-NEXT: vmov.f32 s2, s8 +; CHECK-NEXT: vstrw.32 q6, [r1, #176] +; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: vmov.f32 s1, s9 +; CHECK-NEXT: vmov q2, q7 +; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill +; CHECK-NEXT: vmov q0, q7 +; CHECK-NEXT: vldrw.u32 q7, [sp, #64] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s13, s21 +; CHECK-NEXT: vmov.f32 s14, s22 +; CHECK-NEXT: vmov.f32 s6, s28 ; CHECK-NEXT: vstrw.32 q3, [r1, #128] -; CHECK-NEXT: vmov.f32 s26, s7 -; CHECK-NEXT: vstrw.32 q0, [r1] -; CHECK-NEXT: vmov.f32 s10, s16 -; CHECK-NEXT: vstrw.32 q6, [r1, #80] -; CHECK-NEXT: vmov.f32 s9, s4 -; CHECK-NEXT: vmov.f32 s16, s5 +; CHECK-NEXT: vstrw.32 q1, [sp, #80] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q1, [sp, #48] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s1, s29 +; CHECK-NEXT: vmov.f32 s10, s4 +; CHECK-NEXT: vstrw.32 q0, [r1, #112] +; CHECK-NEXT: vmov.f32 s21, s5 +; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s16, s6 ; CHECK-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-NEXT: vmov.f32 s19, s6 -; CHECK-NEXT: vstrw.32 q4, [r1, #64] -; CHECK-NEXT: add sp, #144 +; CHECK-NEXT: vmov.f32 s19, s7 +; CHECK-NEXT: vldrw.u32 q1, [sp, #80] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q0, [r1, #160] +; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q1, [r1, #96] +; CHECK-NEXT: vldrw.u32 q1, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q5, [r1, #64] +; CHECK-NEXT: vstrw.32 q4, [r1, #80] +; CHECK-NEXT: vstrw.32 q1, [r1, #144] +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: add sp, #96 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -1323,53 +1021,36 @@ define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) { ; CHECK-LABEL: vst3_v8f16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vldrw.u32 q3, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vldrw.u32 q4, [r0, #32] -; CHECK-NEXT: vmov.f32 s0, s4 -; CHECK-NEXT: vmovx.f16 s2, s12 -; CHECK-NEXT: vins.f16 s0, s12 -; CHECK-NEXT: vmov r2, s2 -; CHECK-NEXT: vmov.16 q0[4], r2 -; CHECK-NEXT: vmovx.f16 s4, s4 -; CHECK-NEXT: vmov.f32 s1, s16 -; CHECK-NEXT: vmovx.f16 s11, s15 -; CHECK-NEXT: vmov.f32 s8, s5 -; CHECK-NEXT: vins.f16 s1, s4 -; CHECK-NEXT: vmovx.f16 s4, s16 -; CHECK-NEXT: vins.f16 s8, s13 -; CHECK-NEXT: vins.f16 s2, s4 -; CHECK-NEXT: vmovx.f16 s4, s19 -; CHECK-NEXT: vmov.f32 s3, s8 -; CHECK-NEXT: vins.f16 s11, s4 -; CHECK-NEXT: vmovx.f16 s4, s18 -; CHECK-NEXT: vmovx.f16 s8, s14 -; CHECK-NEXT: vins.f16 s9, s15 -; CHECK-NEXT: vins.f16 s8, s4 -; CHECK-NEXT: vmovx.f16 s4, s9 -; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q2, [r0, #32] +; CHECK-NEXT: vmovx.f16 s4, s0 +; CHECK-NEXT: vmovx.f16 s6, s9 +; CHECK-NEXT: vmov.f32 s5, s1 ; CHECK-NEXT: vins.f16 s9, s4 -; CHECK-NEXT: vrev32.16 q3, q3 -; CHECK-NEXT: vmov.f32 s10, s19 -; CHECK-NEXT: vmovx.f16 s4, s7 +; CHECK-NEXT: vmovx.f16 s4, s3 +; CHECK-NEXT: vins.f16 s5, s6 +; CHECK-NEXT: vmovx.f16 s6, s10 ; CHECK-NEXT: vins.f16 s10, s4 -; CHECK-NEXT: vmovx.f16 s4, s5 -; CHECK-NEXT: vmov.f32 s12, s17 -; CHECK-NEXT: vstrw.32 q2, [r1, #32] -; CHECK-NEXT: vins.f16 s12, s4 -; CHECK-NEXT: vmovx.f16 s4, s6 -; CHECK-NEXT: vins.f16 s18, s4 -; CHECK-NEXT: vmovx.f16 s4, s17 -; CHECK-NEXT: vins.f16 s13, s4 -; CHECK-NEXT: vmovx.f16 s4, s14 -; CHECK-NEXT: vins.f16 s6, s4 -; CHECK-NEXT: vmov.f32 s15, s18 -; CHECK-NEXT: vmov.f32 s14, s6 +; CHECK-NEXT: vmov.f32 s12, s0 +; CHECK-NEXT: vmovx.f16 s4, s2 +; CHECK-NEXT: vins.f16 s12, s6 +; CHECK-NEXT: vmovx.f16 s6, s11 +; CHECK-NEXT: vins.f16 s11, s4 +; CHECK-NEXT: vmovx.f16 s4, s1 +; CHECK-NEXT: vmov.f32 s15, s3 +; CHECK-NEXT: vmovx.f16 s14, s8 +; CHECK-NEXT: vins.f16 s8, s4 +; CHECK-NEXT: vins.f16 s15, s6 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: vins.f16 s2, s14 +; CHECK-NEXT: vmov.f32 s13, s1 +; CHECK-NEXT: vmov.f32 s14, s11 +; CHECK-NEXT: vmov.f32 s1, s8 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: vmov.f32 s4, s9 ; CHECK-NEXT: vstrw.32 q0, [r1] -; CHECK-NEXT: vstrw.32 q3, [r1, #16] -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vstrw.32 q1, [r1, #16] ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0 @@ -1390,121 +1071,66 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #96 -; CHECK-NEXT: sub sp, #96 -; CHECK-NEXT: vldrw.u32 q2, [r0, #32] +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] ; CHECK-NEXT: vldrw.u32 q3, [r0, #64] -; CHECK-NEXT: vldrw.u32 q4, [r0, #16] -; CHECK-NEXT: vldrw.u32 q5, [r0, #48] -; CHECK-NEXT: vmovx.f16 s0, s15 -; CHECK-NEXT: vmovx.f16 s7, s11 -; CHECK-NEXT: vins.f16 s7, s0 -; CHECK-NEXT: vmov q6, q2 -; CHECK-NEXT: vmovx.f16 s0, s14 -; CHECK-NEXT: vmovx.f16 s4, s10 -; CHECK-NEXT: vins.f16 s1, s11 -; CHECK-NEXT: vldrw.u32 q2, [r0] -; CHECK-NEXT: vins.f16 s4, s0 -; CHECK-NEXT: vmovx.f16 s0, s1 -; CHECK-NEXT: vstrw.32 q1, [sp, #80] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s5, s11 -; CHECK-NEXT: vins.f16 s5, s0 -; CHECK-NEXT: vmov.f32 s6, s15 -; CHECK-NEXT: vmovx.f16 s0, s11 -; CHECK-NEXT: vmov q7, q4 -; CHECK-NEXT: vins.f16 s6, s0 -; CHECK-NEXT: vmovx.f16 s2, s20 -; CHECK-NEXT: vstrw.32 q1, [sp, #64] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s4, s16 -; CHECK-NEXT: vins.f16 s4, s20 -; CHECK-NEXT: vmov.f32 s0, s17 -; CHECK-NEXT: vmov r2, s2 -; CHECK-NEXT: vstrw.32 q7, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s16, s4 -; CHECK-NEXT: vmovx.f16 s4, s28 -; CHECK-NEXT: vldrw.u32 q7, [r0, #80] -; CHECK-NEXT: vmov.16 q4[4], r2 -; CHECK-NEXT: vins.f16 s0, s21 -; CHECK-NEXT: vstrw.32 q5, [sp] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s19, s0 -; CHECK-NEXT: vmovx.f16 s0, s28 -; CHECK-NEXT: vins.f16 s18, s0 -; CHECK-NEXT: vmov.f64 d0, d4 -; CHECK-NEXT: vstrw.32 q6, [sp, #48] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q6, [r0, #80] +; CHECK-NEXT: vmovx.f16 s8, s15 +; CHECK-NEXT: vmov.f32 s7, s3 +; CHECK-NEXT: vins.f16 s7, s8 +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vmovx.f16 s6, s14 +; CHECK-NEXT: vmov.f32 s4, s0 +; CHECK-NEXT: vins.f16 s4, s6 +; CHECK-NEXT: vmovx.f16 s6, s2 +; CHECK-NEXT: vins.f16 s15, s6 +; CHECK-NEXT: vmovx.f16 s6, s8 +; CHECK-NEXT: vmovx.f16 s5, s13 +; CHECK-NEXT: vins.f16 s13, s6 +; CHECK-NEXT: vmovx.f16 s6, s11 ; CHECK-NEXT: vmov.f32 s20, s8 -; CHECK-NEXT: vmovx.f16 s8, s24 -; CHECK-NEXT: vmov.f32 s22, s28 -; CHECK-NEXT: vins.f16 s20, s24 -; CHECK-NEXT: vmov r0, s8 -; CHECK-NEXT: vmov.f32 s17, s28 -; CHECK-NEXT: vmov.16 q5[4], r0 -; CHECK-NEXT: vmov.f32 s2, s10 -; CHECK-NEXT: vins.f16 s17, s4 -; CHECK-NEXT: vmov.f32 s4, s9 -; CHECK-NEXT: vldrw.u32 q2, [sp] @ 16-byte Reload -; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vmov.f32 s21, s12 -; CHECK-NEXT: vmovx.f16 s24, s10 -; CHECK-NEXT: vins.f16 s21, s0 -; CHECK-NEXT: vmovx.f16 s0, s12 -; CHECK-NEXT: vins.f16 s22, s0 -; CHECK-NEXT: vmovx.f16 s0, s30 -; CHECK-NEXT: vins.f16 s24, s0 -; CHECK-NEXT: vmovx.f16 s0, s31 -; CHECK-NEXT: vmovx.f16 s27, s11 -; CHECK-NEXT: vins.f16 s4, s25 -; CHECK-NEXT: vins.f16 s27, s0 -; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s25, s11 -; CHECK-NEXT: vmov.f32 s23, s4 -; CHECK-NEXT: vmovx.f16 s4, s25 -; CHECK-NEXT: vmov.f32 s25, s3 -; CHECK-NEXT: vmov.f32 s26, s31 -; CHECK-NEXT: vmovx.f16 s0, s3 -; CHECK-NEXT: vins.f16 s25, s4 -; CHECK-NEXT: vins.f16 s26, s0 -; CHECK-NEXT: vmovx.f16 s4, s1 -; CHECK-NEXT: vmov.f32 s0, s29 -; CHECK-NEXT: vins.f16 s0, s4 -; CHECK-NEXT: vmovx.f16 s4, s2 -; CHECK-NEXT: vins.f16 s30, s4 -; CHECK-NEXT: vmov.f32 s6, s18 -; CHECK-NEXT: vrev32.16 q2, q2 -; CHECK-NEXT: vmovx.f16 s4, s29 -; CHECK-NEXT: vmov.f32 s3, s30 -; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s9, s4 -; CHECK-NEXT: vmovx.f16 s4, s10 -; CHECK-NEXT: vins.f16 s2, s4 -; CHECK-NEXT: vmovx.f16 s4, s29 -; CHECK-NEXT: vmov.f32 s8, s13 -; CHECK-NEXT: vstrw.32 q6, [r1, #80] -; CHECK-NEXT: vins.f16 s8, s4 -; CHECK-NEXT: vmovx.f16 s4, s30 -; CHECK-NEXT: vins.f16 s14, s4 -; CHECK-NEXT: vmov.f32 s10, s30 -; CHECK-NEXT: vmov.f32 s11, s14 -; CHECK-NEXT: vmovx.f16 s4, s13 -; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vldrw.u32 q7, [sp, #64] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s18, s6 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vrev32.16 q3, q3 -; CHECK-NEXT: vmov.f32 s6, s30 -; CHECK-NEXT: vldrw.u32 q7, [sp, #80] @ 16-byte Reload -; CHECK-NEXT: vins.f16 s13, s4 -; CHECK-NEXT: vmovx.f16 s4, s14 -; CHECK-NEXT: vmov.f32 s1, s9 -; CHECK-NEXT: vins.f16 s10, s4 -; CHECK-NEXT: vmov.f32 s9, s13 -; CHECK-NEXT: vmov.f32 s4, s28 -; CHECK-NEXT: vstrw.32 q2, [r1, #16] -; CHECK-NEXT: vmov.f32 s7, s31 -; CHECK-NEXT: vstrw.32 q4, [r1, #48] +; CHECK-NEXT: vins.f16 s14, s6 +; CHECK-NEXT: vmovx.f16 s6, s26 +; CHECK-NEXT: vmov.f32 s17, s9 +; CHECK-NEXT: vins.f16 s20, s6 +; CHECK-NEXT: vmovx.f16 s6, s10 +; CHECK-NEXT: vins.f16 s17, s5 +; CHECK-NEXT: vmovx.f16 s5, s27 +; CHECK-NEXT: vins.f16 s27, s6 +; CHECK-NEXT: vmov.f32 s23, s11 +; CHECK-NEXT: vmovx.f16 s6, s0 +; CHECK-NEXT: vins.f16 s23, s5 +; CHECK-NEXT: vmovx.f16 s5, s25 +; CHECK-NEXT: vins.f16 s25, s6 +; CHECK-NEXT: vmovx.f16 s6, s3 +; CHECK-NEXT: vins.f16 s26, s6 +; CHECK-NEXT: vmovx.f16 s6, s1 +; CHECK-NEXT: vmov.f32 s29, s1 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmovx.f16 s14, s12 +; CHECK-NEXT: vins.f16 s12, s6 +; CHECK-NEXT: vmovx.f16 s6, s9 +; CHECK-NEXT: vins.f16 s29, s5 +; CHECK-NEXT: vmovx.f16 s5, s24 +; CHECK-NEXT: vins.f16 s24, s6 +; CHECK-NEXT: vmov.f32 s21, s9 +; CHECK-NEXT: vmov.f32 s22, s27 +; CHECK-NEXT: vmov.f32 s30, s2 +; CHECK-NEXT: vins.f16 s2, s14 +; CHECK-NEXT: vmov.f32 s18, s10 +; CHECK-NEXT: vins.f16 s10, s5 +; CHECK-NEXT: vmov.f32 s16, s13 +; CHECK-NEXT: vstrw.32 q5, [r1, #80] +; CHECK-NEXT: vmov.f32 s5, s1 +; CHECK-NEXT: vstrw.32 q4, [r1, #16] +; CHECK-NEXT: vmov.f32 s6, s15 +; CHECK-NEXT: vmov.f32 s1, s12 ; CHECK-NEXT: vstrw.32 q1, [r1, #32] -; CHECK-NEXT: vstrw.32 q0, [r1, #64] -; CHECK-NEXT: add sp, #96 +; CHECK-NEXT: vmov.f32 s9, s24 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vmov.f32 s28, s25 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s31, s26 +; CHECK-NEXT: vstrw.32 q7, [r1, #64] ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -1554,31 +1180,25 @@ define void @vst3_v4f64(<4 x double> *%src, <12 x double> *%dst) { ; CHECK-LABEL: vst3_v4f64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vldrw.u32 q7, [r0, #48] -; CHECK-NEXT: vldrw.u32 q6, [r0, #32] -; CHECK-NEXT: vldrw.u32 q0, [r0, #80] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmov.f64 d6, d15 -; CHECK-NEXT: vldrw.u32 q2, [r0, #16] +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #80] ; CHECK-NEXT: vldrw.u32 q4, [r0, #64] -; CHECK-NEXT: vmov.f64 d15, d13 -; CHECK-NEXT: vmov.f64 d7, d1 -; CHECK-NEXT: vmov.f64 d10, d2 +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: vmov.f64 d7, d3 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vmov.f64 d6, d4 +; CHECK-NEXT: vmov.f64 d3, d1 ; CHECK-NEXT: vstrw.32 q3, [r1, #80] -; CHECK-NEXT: vmov.f64 d11, d12 -; CHECK-NEXT: vmov.f64 d2, d8 -; CHECK-NEXT: vstrw.32 q5, [r1] -; CHECK-NEXT: vmov.f64 d1, d5 -; CHECK-NEXT: vstrw.32 q1, [r1, #16] -; CHECK-NEXT: vmov.f64 d8, d15 -; CHECK-NEXT: vstrw.32 q0, [r1, #64] -; CHECK-NEXT: vmov.f64 d12, d4 +; CHECK-NEXT: vmov.f64 d10, d8 +; CHECK-NEXT: vstrw.32 q1, [r1, #64] +; CHECK-NEXT: vmov.f64 d11, d5 +; CHECK-NEXT: vmov.f64 d8, d0 +; CHECK-NEXT: vstrw.32 q5, [r1, #16] ; CHECK-NEXT: vstrw.32 q4, [r1, #32] -; CHECK-NEXT: vmov.f64 d13, d14 -; CHECK-NEXT: vstrw.32 q6, [r1, #48] -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x double>, <4 x double>* %src, i32 0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll --- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll @@ -757,53 +757,42 @@ ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #64 ; CHECK-NEXT: sub sp, #64 -; CHECK-NEXT: vldrw.u32 q0, [r0, #32] -; CHECK-NEXT: vldrw.u32 q7, [r0] -; CHECK-NEXT: vldrw.u32 q2, [r0, #96] -; CHECK-NEXT: vldrw.u32 q3, [r0, #64] -; CHECK-NEXT: vmov.f32 s6, s0 -; CHECK-NEXT: vldrw.u32 q5, [r0, #112] -; CHECK-NEXT: vmov.f32 s7, s1 -; CHECK-NEXT: vldrw.u32 q4, [r0, #48] -; CHECK-NEXT: vmov.f64 d13, d1 +; CHECK-NEXT: vldrw.u32 q7, [r0, #80] +; CHECK-NEXT: vldrw.u32 q5, [r0, #32] +; CHECK-NEXT: vldrw.u32 q6, [r0] +; CHECK-NEXT: vldrw.u32 q1, [r0, #96] +; CHECK-NEXT: vstrw.32 q7, [sp, #32] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d15, d10 +; CHECK-NEXT: vldrw.u32 q2, [r0, #64] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #112] ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill -; CHECK-NEXT: vldrw.u32 q0, [r0, #80] -; CHECK-NEXT: vmov.f32 s4, s28 -; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s5, s29 -; CHECK-NEXT: vmov.f32 s24, s30 -; CHECK-NEXT: vstrw.32 q1, [sp] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s25, s31 +; CHECK-NEXT: vmov.f64 d14, d12 +; CHECK-NEXT: vstrw.32 q7, [sp, #48] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d14, d4 +; CHECK-NEXT: vmov.f64 d15, d2 +; CHECK-NEXT: vstrw.32 q7, [sp] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d4, d0 +; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s6, s8 -; CHECK-NEXT: vstrw.32 q6, [sp, #48] @ 16-byte Spill -; CHECK-NEXT: vmov.f32 s7, s9 -; CHECK-NEXT: vmov.f32 s4, s12 -; CHECK-NEXT: vmov.f32 s5, s13 -; CHECK-NEXT: vmov.f32 s8, s14 -; CHECK-NEXT: vstrw.32 q1, [r1, #16] -; CHECK-NEXT: vmov.f32 s9, s15 -; CHECK-NEXT: vldrw.u32 q3, [sp, #32] @ 16-byte Reload -; CHECK-NEXT: vmov.f64 d1, d15 -; CHECK-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-NEXT: vmov.f64 d13, d7 -; CHECK-NEXT: vmov.f32 s14, s20 -; CHECK-NEXT: vmov.f32 s15, s21 -; CHECK-NEXT: vmov.f32 s30, s16 -; CHECK-NEXT: vstrw.32 q3, [r1, #80] -; CHECK-NEXT: vmov.f32 s31, s17 -; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s16, s2 -; CHECK-NEXT: vstrw.32 q7, [r1, #64] -; CHECK-NEXT: vmov.f32 s17, s3 -; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload -; CHECK-NEXT: vmov.f32 s20, s26 -; CHECK-NEXT: vstrw.32 q4, [r1, #96] -; CHECK-NEXT: vmov.f32 s21, s27 -; CHECK-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-NEXT: vstrw.32 q5, [r1, #112] +; CHECK-NEXT: vmov.f64 d10, d13 +; CHECK-NEXT: vmov.f64 d2, d5 +; CHECK-NEXT: vstrw.32 q5, [r1, #32] +; CHECK-NEXT: vmov.f64 d5, d6 +; CHECK-NEXT: vstrw.32 q1, [r1, #48] +; CHECK-NEXT: vmov.f64 d13, d8 +; CHECK-NEXT: vstrw.32 q2, [r1, #64] +; CHECK-NEXT: vmov.f64 d12, d0 +; CHECK-NEXT: vmov.f64 d8, d1 +; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q6, [r1, #80] ; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload +; CHECK-NEXT: vmov.f64 d6, d15 +; CHECK-NEXT: vstrw.32 q4, [r1, #112] +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vstrw.32 q3, [r1, #96] ; CHECK-NEXT: add sp, #64 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr diff --git a/llvm/test/CodeGen/X86/haddsub-4.ll b/llvm/test/CodeGen/X86/haddsub-4.ll --- a/llvm/test/CodeGen/X86/haddsub-4.ll +++ b/llvm/test/CodeGen/X86/haddsub-4.ll @@ -330,19 +330,19 @@ define <16 x float> @hadd_reverse_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind { ; SSE-LABEL: hadd_reverse_v16f32: ; SSE: # %bb.0: -; SSE-NEXT: movaps %xmm5, %xmm8 -; SSE-NEXT: movaps %xmm1, %xmm5 -; SSE-NEXT: haddps %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0,3,2] -; SSE-NEXT: haddps %xmm6, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0,3,2] -; SSE-NEXT: haddps %xmm0, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0,3,2] -; SSE-NEXT: haddps %xmm4, %xmm8 -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0,3,2] -; SSE-NEXT: movaps %xmm3, %xmm0 -; SSE-NEXT: movaps %xmm7, %xmm1 -; SSE-NEXT: movaps %xmm5, %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm8 +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: haddps %xmm3, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,2,1,0] +; SSE-NEXT: haddps %xmm7, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2,1,0] +; SSE-NEXT: haddps %xmm1, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,2,1,0] +; SSE-NEXT: haddps %xmm5, %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,2,1,0] +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm6, %xmm1 +; SSE-NEXT: movaps %xmm4, %xmm2 ; SSE-NEXT: movaps %xmm8, %xmm3 ; SSE-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll --- a/llvm/test/CodeGen/X86/oddshuffles.ll +++ b/llvm/test/CodeGen/X86/oddshuffles.ll @@ -512,42 +512,30 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind { ; SSE2-LABEL: v12i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movaps %xmm2, %xmm3 -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3] -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2] -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] -; SSE2-NEXT: movaps %xmm2, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[1,0] -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3] -; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[0,2] -; SSE2-NEXT: movaps %xmm2, 32(%rdi) -; SSE2-NEXT: movaps %xmm5, 16(%rdi) -; SSE2-NEXT: movaps %xmm4, (%rdi) +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0] +; SSE2-NEXT: movaps %xmm1, %xmm3 +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[0,2] +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm2[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3] +; SSE2-NEXT: movaps %xmm1, 32(%rdi) +; SSE2-NEXT: movaps %xmm0, 16(%rdi) +; SSE2-NEXT: movaps %xmm3, (%rdi) ; SSE2-NEXT: retq ; ; SSE42-LABEL: v12i32: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5],xmm4[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7] -; SSE42-NEXT: movdqa %xmm0, 32(%rdi) -; SSE42-NEXT: movdqa %xmm4, 16(%rdi) -; SSE42-NEXT: movdqa %xmm3, (%rdi) +; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] +; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3,4,5],xmm3[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7] +; SSE42-NEXT: movdqa %xmm1, 16(%rdi) +; SSE42-NEXT: movdqa %xmm3, 32(%rdi) +; SSE42-NEXT: movdqa %xmm0, (%rdi) ; SSE42-NEXT: retq ; ; AVX1-LABEL: v12i32: @@ -1211,66 +1199,43 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind { ; SSE2-LABEL: interleave_24i16_in: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqu (%rsi), %xmm3 -; SSE2-NEXT: movdqu (%rdx), %xmm2 +; SSE2-NEXT: movdqu (%rdx), %xmm0 ; SSE2-NEXT: movdqu (%rcx), %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: pand %xmm2, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0] -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pandn %xmm4, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5] -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: por %xmm5, %xmm3 -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pandn %xmm4, %xmm5 -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: por %xmm5, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: pandn %xmm6, %xmm5 -; SSE2-NEXT: por %xmm1, %xmm5 -; SSE2-NEXT: pand %xmm0, %xmm5 -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,4,4] -; SSE2-NEXT: pandn %xmm1, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: movdqu %xmm0, 16(%rdi) -; SSE2-NEXT: movdqu %xmm2, 32(%rdi) -; SSE2-NEXT: movdqu %xmm3, (%rdi) +; SSE2-NEXT: pandn %xmm4, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,0,65535] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,2,2] +; SSE2-NEXT: pandn %xmm5, %xmm3 +; SSE2-NEXT: por %xmm4, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0] +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; SSE2-NEXT: pandn %xmm1, %xmm4 +; SSE2-NEXT: por %xmm0, %xmm4 +; SSE2-NEXT: movdqu %xmm4, 32(%rdi) +; SSE2-NEXT: movdqu %xmm3, 16(%rdi) +; SSE2-NEXT: movdqu %xmm2, (%rdi) ; SSE2-NEXT: retq ; ; SSE42-LABEL: interleave_24i16_in: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqu (%rsi), %xmm0 -; SSE42-NEXT: movdqu (%rdx), %xmm1 -; SSE42-NEXT: movdqu (%rcx), %xmm2 -; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3] -; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,0,0] -; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,2] -; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7] -; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7] -; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,6,7,u,u,8,9,10,11,u,u,12,13,14,15] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7] -; SSE42-NEXT: movdqu %xmm4, 32(%rdi) +; SSE42-NEXT: movdqu (%rdx), %xmm0 +; SSE42-NEXT: movdqu (%rcx), %xmm1 +; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] +; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,2] +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7] +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7] +; SSE42-NEXT: movdqu %xmm1, 32(%rdi) ; SSE42-NEXT: movdqu %xmm3, 16(%rdi) -; SSE42-NEXT: movdqu %xmm5, (%rdi) +; SSE42-NEXT: movdqu %xmm2, (%rdi) ; SSE42-NEXT: retq ; ; AVX1-LABEL: interleave_24i16_in: @@ -1631,232 +1596,105 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind { ; SSE2-LABEL: interleave_24i32_in: ; SSE2: # %bb.0: -; SSE2-NEXT: movups (%rsi), %xmm1 -; SSE2-NEXT: movups 16(%rsi), %xmm0 -; SSE2-NEXT: movups (%rdx), %xmm8 -; SSE2-NEXT: movups 16(%rdx), %xmm5 -; SSE2-NEXT: movups (%rcx), %xmm3 -; SSE2-NEXT: movups 16(%rcx), %xmm6 -; SSE2-NEXT: movaps %xmm3, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[1,3] -; SSE2-NEXT: movaps %xmm1, %xmm9 -; SSE2-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1] -; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[0,2] -; SSE2-NEXT: movaps %xmm5, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3] -; SSE2-NEXT: movaps %xmm6, %xmm4 -; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,2] -; SSE2-NEXT: movaps %xmm0, %xmm7 -; SSE2-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1] -; SSE2-NEXT: movaps %xmm6, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3] -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2] -; SSE2-NEXT: movaps %xmm8, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm3[3,3] +; SSE2-NEXT: movups (%rdx), %xmm0 +; SSE2-NEXT: movups 16(%rdx), %xmm1 +; SSE2-NEXT: movups (%rcx), %xmm2 +; SSE2-NEXT: movups 16(%rcx), %xmm3 +; SSE2-NEXT: movaps %xmm2, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm0[3,0] +; SSE2-NEXT: movaps %xmm0, %xmm5 +; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm4[0,2] +; SSE2-NEXT: movaps %xmm1, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,2],xmm3[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0,1,3] ; SSE2-NEXT: movaps %xmm3, %xmm6 -; SSE2-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3] -; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2] -; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm8[1] -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[1,0] -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2] -; SSE2-NEXT: movups %xmm3, 16(%rdi) -; SSE2-NEXT: movups %xmm6, 32(%rdi) -; SSE2-NEXT: movups %xmm0, 48(%rdi) -; SSE2-NEXT: movups %xmm2, 64(%rdi) +; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm0[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm1[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0,2] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] +; SSE2-NEXT: movups %xmm2, 16(%rdi) +; SSE2-NEXT: movups %xmm0, 32(%rdi) +; SSE2-NEXT: movups %xmm1, 48(%rdi) +; SSE2-NEXT: movups %xmm6, 64(%rdi) ; SSE2-NEXT: movups %xmm4, 80(%rdi) -; SSE2-NEXT: movups %xmm9, (%rdi) +; SSE2-NEXT: movups %xmm5, (%rdi) ; SSE2-NEXT: retq ; ; SSE42-LABEL: interleave_24i32_in: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqu (%rsi), %xmm8 -; SSE42-NEXT: movdqu 16(%rsi), %xmm4 -; SSE42-NEXT: movdqu (%rdx), %xmm2 -; SSE42-NEXT: movdqu 16(%rdx), %xmm5 -; SSE42-NEXT: movdqu (%rcx), %xmm3 -; SSE42-NEXT: movdqu 16(%rcx), %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1] -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3],xmm7[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3] -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,2,3,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2] -; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,1,1] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,1,0,1] -; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5],xmm4[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,2,3] -; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3] -; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5],xmm6[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5],xmm2[6,7] -; SSE42-NEXT: movdqu %xmm2, 16(%rdi) -; SSE42-NEXT: movdqu %xmm4, 32(%rdi) -; SSE42-NEXT: movdqu %xmm5, 48(%rdi) +; SSE42-NEXT: movdqu (%rdx), %xmm0 +; SSE42-NEXT: movdqu 16(%rdx), %xmm1 +; SSE42-NEXT: movdqu (%rcx), %xmm2 +; SSE42-NEXT: movdqu 16(%rcx), %xmm3 +; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,1,0,1] +; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,2,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,3,4,5],xmm5[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,1,0,1] +; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm1[0,1,2,3],xmm6[4,5],xmm1[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm0[2,3,4,5],xmm7[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5,6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7] +; SSE42-NEXT: movdqu %xmm1, 16(%rdi) ; SSE42-NEXT: movdqu %xmm0, 64(%rdi) -; SSE42-NEXT: movdqu %xmm7, 80(%rdi) -; SSE42-NEXT: movdqu %xmm1, (%rdi) +; SSE42-NEXT: movdqu %xmm7, 32(%rdi) +; SSE42-NEXT: movdqu %xmm6, 48(%rdi) +; SSE42-NEXT: movdqu %xmm5, 80(%rdi) +; SSE42-NEXT: movdqu %xmm4, (%rdi) ; SSE42-NEXT: retq ; ; AVX1-LABEL: interleave_24i32_in: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovups (%rdx), %xmm0 -; AVX1-NEXT: vmovups (%rsi), %xmm1 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd (%rcx), %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vmovups 16(%rcx), %xmm1 -; AVX1-NEXT: vmovups 16(%rdx), %xmm2 -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vmovups %ymm2, 32(%rdi) -; AVX1-NEXT: vmovups %ymm1, 64(%rdi) +; AVX1-NEXT: vmovupd (%rdx), %ymm0 +; AVX1-NEXT: vmovupd (%rcx), %ymm1 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7] +; AVX1-NEXT: vbroadcastsd (%rcx), %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] ; AVX1-NEXT: vmovups %ymm0, (%rdi) +; AVX1-NEXT: vmovups %ymm1, 32(%rdi) +; AVX1-NEXT: vmovups %ymm2, 64(%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: interleave_24i32_in: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0 -; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1 -; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2 -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm4 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vbroadcastsd 24(%rsi), %ymm5 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vmovups %ymm0, 32(%rdi) -; AVX2-SLOW-NEXT: vmovups %ymm4, 64(%rdi) -; AVX2-SLOW-NEXT: vmovups %ymm3, (%rdi) -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-ALL-LABEL: interleave_24i32_in: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovups (%rsi), %ymm0 -; AVX2-FAST-ALL-NEXT: vmovups (%rdx), %ymm1 -; AVX2-FAST-ALL-NEXT: vmovups (%rcx), %ymm2 -; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm3, %ymm3 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rcx), %ymm4 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u> -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm4, %ymm4 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rsi), %ymm5 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vmovups %ymm0, 32(%rdi) -; AVX2-FAST-ALL-NEXT: vmovups %ymm4, 64(%rdi) -; AVX2-FAST-ALL-NEXT: vmovups %ymm3, (%rdi) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: interleave_24i32_in: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: vmovups (%rsi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovups (%rdx), %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovups (%rcx), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rcx), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rsi), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, 32(%rdi) -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, 64(%rdi) -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, (%rdi) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-LABEL: interleave_24i32_in: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdx), %ymm0 +; AVX2-NEXT: vmovups (%rcx), %ymm1 +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7] +; AVX2-NEXT: vbroadcastsd (%rcx), %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] +; AVX2-NEXT: vmovups %ymm0, (%rdi) +; AVX2-NEXT: vmovups %ymm1, 32(%rdi) +; AVX2-NEXT: vmovups %ymm2, 64(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; XOP-LABEL: interleave_24i32_in: ; XOP: # %bb.0: -; XOP-NEXT: vmovups (%rsi), %ymm0 -; XOP-NEXT: vmovups (%rcx), %ymm1 -; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[u,3],ymm0[4],ymm1[u,4],ymm0[5] -; XOP-NEXT: vmovups (%rdx), %xmm1 -; XOP-NEXT: vmovups (%rsi), %xmm2 -; XOP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm1[1] -; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm1[1,1],xmm3[0,2] -; XOP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1] -; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; XOP-NEXT: vbroadcastsd (%rcx), %ymm2 -; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; XOP-NEXT: vmovups 16(%rcx), %xmm2 -; XOP-NEXT: vmovups 16(%rdx), %xmm3 -; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm3[3,0],xmm2[3,0] -; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm2[2,1],xmm4[0,2] -; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0] -; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,2] -; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 -; XOP-NEXT: vbroadcastsd 24(%rsi), %ymm3 -; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; XOP-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7] +; XOP-NEXT: vmovupd (%rdx), %ymm0 +; XOP-NEXT: vmovupd (%rcx), %ymm1 +; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,2,3] +; XOP-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[0,0,3,3] +; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7] +; XOP-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2] +; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7] +; XOP-NEXT: vbroadcastsd (%rcx), %ymm3 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] -; XOP-NEXT: vmovups %ymm0, 32(%rdi) +; XOP-NEXT: vmovups %ymm0, (%rdi) +; XOP-NEXT: vmovups %ymm1, 32(%rdi) ; XOP-NEXT: vmovups %ymm2, 64(%rdi) -; XOP-NEXT: vmovups %ymm1, (%rdi) ; XOP-NEXT: vzeroupper ; XOP-NEXT: retq %s1 = load <8 x i32>, <8 x i32>* %q1, align 4 @@ -1873,80 +1711,103 @@ define void @splat3_128(<16 x i8> %a0, <16 x i8> %a1, <96 x i8> *%a2) { ; SSE2-LABEL: splat3_128: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [0,65535,65535,0,65535,65535,0,65535] +; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,1,2,2] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[3,1,2,0] +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,2,2] +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: pandn %xmm6, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm10 = xmm3[0,1,0,3,4,5,6,7] +; SSE2-NEXT: packuswb %xmm2, %xmm10 +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: pandn %xmm9, %xmm2 +; SSE2-NEXT: por %xmm5, %xmm2 +; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,6,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,0] +; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7] ; SSE2-NEXT: packuswb %xmm5, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm5, %xmm3 -; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm0, %xmm5 -; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6] -; SSE2-NEXT: packuswb %xmm6, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm4, %xmm0 -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15] +; SSE2-NEXT: movdqa %xmm1, %xmm6 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,2,2] +; SSE2-NEXT: movdqa %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm7, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm5[1,1,2,2] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[3,1,2,0] +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: por %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,2,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7] +; SSE2-NEXT: packuswb %xmm3, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm6 +; SSE2-NEXT: pandn %xmm8, %xmm4 +; SSE2-NEXT: por %xmm6, %xmm4 +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,6,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,0] +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7] +; SSE2-NEXT: packuswb %xmm3, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] +; SSE2-NEXT: pshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7] +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[2,1,3,3,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm7, %xmm6 +; SSE2-NEXT: por %xmm1, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,4,4,4] +; SSE2-NEXT: pandn %xmm1, %xmm3 +; SSE2-NEXT: por %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm3, 64(%rdi) +; SSE2-NEXT: movdqa %xmm6, 16(%rdi) ; SSE2-NEXT: movdqa %xmm4, 80(%rdi) -; SSE2-NEXT: movdqa %xmm0, 64(%rdi) -; SSE2-NEXT: movdqa %xmm7, 48(%rdi) -; SSE2-NEXT: movdqa %xmm5, 32(%rdi) -; SSE2-NEXT: movdqa %xmm3, 16(%rdi) -; SSE2-NEXT: movdqa %xmm2, (%rdi) +; SSE2-NEXT: movdqa %xmm5, 48(%rdi) +; SSE2-NEXT: movdqa %xmm2, 32(%rdi) +; SSE2-NEXT: movdqa %xmm10, (%rdi) ; SSE2-NEXT: retq ; ; SSE42-LABEL: splat3_128: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5] +; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [128,5,128,128,6,128,128,7,128,128,8,128,128,9,128,128] ; SSE42-NEXT: movdqa %xmm0, %xmm3 ; SSE42-NEXT: pshufb %xmm2, %xmm3 -; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10] -; SSE42-NEXT: movdqa %xmm0, %xmm5 +; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,128,2,3,128,5,6,128,8,9,128,11,12,128,14,15] +; SSE42-NEXT: movdqa %xmm1, %xmm5 ; SSE42-NEXT: pshufb %xmm4, %xmm5 -; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15] +; SSE42-NEXT: por %xmm3, %xmm5 +; SSE42-NEXT: movdqa %xmm1, %xmm3 +; SSE42-NEXT: pshufb %xmm2, %xmm3 +; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: pshufb %xmm4, %xmm2 +; SSE42-NEXT: por %xmm3, %xmm2 +; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,0,3,4,1,6,7,2,9,10,3,12,13,4,15] +; SSE42-NEXT: movdqa %xmm0, %xmm4 +; SSE42-NEXT: pshufb %xmm3, %xmm4 +; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,1,2,11,4,5,12,7,8,13,10,11,14,13,14,15] ; SSE42-NEXT: pshufb %xmm6, %xmm0 ; SSE42-NEXT: movdqa %xmm1, %xmm7 -; SSE42-NEXT: pshufb %xmm2, %xmm7 -; SSE42-NEXT: movdqa %xmm1, %xmm2 -; SSE42-NEXT: pshufb %xmm4, %xmm2 +; SSE42-NEXT: pshufb %xmm3, %xmm7 ; SSE42-NEXT: pshufb %xmm6, %xmm1 ; SSE42-NEXT: movdqa %xmm1, 80(%rdi) -; SSE42-NEXT: movdqa %xmm2, 64(%rdi) ; SSE42-NEXT: movdqa %xmm7, 48(%rdi) ; SSE42-NEXT: movdqa %xmm0, 32(%rdi) +; SSE42-NEXT: movdqa %xmm4, (%rdi) +; SSE42-NEXT: movdqa %xmm2, 64(%rdi) ; SSE42-NEXT: movdqa %xmm5, 16(%rdi) -; SSE42-NEXT: movdqa %xmm3, (%rdi) ; SSE42-NEXT: retq ; ; AVX1-LABEL: splat3_128: @@ -2044,80 +1905,103 @@ define void @splat3_256(<32 x i8> %a0, <96 x i8> *%a1) { ; SSE2-LABEL: splat3_256: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [0,65535,65535,0,65535,65535,0,65535] +; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,1,2,2] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[3,1,2,0] +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,2,2] +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: pandn %xmm6, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm10 = xmm3[0,1,0,3,4,5,6,7] +; SSE2-NEXT: packuswb %xmm2, %xmm10 +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: pandn %xmm9, %xmm2 +; SSE2-NEXT: por %xmm5, %xmm2 +; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,6,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,0] +; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7] ; SSE2-NEXT: packuswb %xmm5, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm5, %xmm3 -; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm0, %xmm5 -; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6] -; SSE2-NEXT: packuswb %xmm6, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm4, %xmm0 -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7] -; SSE2-NEXT: packuswb %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15] +; SSE2-NEXT: movdqa %xmm1, %xmm6 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,2,2] +; SSE2-NEXT: movdqa %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm7, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm5[1,1,2,2] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[3,1,2,0] +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: por %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,2,1,4,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3] +; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7] +; SSE2-NEXT: packuswb %xmm3, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm6 +; SSE2-NEXT: pandn %xmm8, %xmm4 +; SSE2-NEXT: por %xmm6, %xmm4 +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,6,5,6,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,0] +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7] +; SSE2-NEXT: packuswb %xmm3, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] +; SSE2-NEXT: pshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7] +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[2,1,3,3,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm7, %xmm6 +; SSE2-NEXT: por %xmm1, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,4,4,4] +; SSE2-NEXT: pandn %xmm1, %xmm3 +; SSE2-NEXT: por %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm3, 64(%rdi) +; SSE2-NEXT: movdqa %xmm6, 16(%rdi) ; SSE2-NEXT: movdqa %xmm4, 80(%rdi) -; SSE2-NEXT: movdqa %xmm0, 64(%rdi) -; SSE2-NEXT: movdqa %xmm7, 48(%rdi) -; SSE2-NEXT: movdqa %xmm5, 32(%rdi) -; SSE2-NEXT: movdqa %xmm3, 16(%rdi) -; SSE2-NEXT: movdqa %xmm2, (%rdi) +; SSE2-NEXT: movdqa %xmm5, 48(%rdi) +; SSE2-NEXT: movdqa %xmm2, 32(%rdi) +; SSE2-NEXT: movdqa %xmm10, (%rdi) ; SSE2-NEXT: retq ; ; SSE42-LABEL: splat3_256: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5] +; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [128,5,128,128,6,128,128,7,128,128,8,128,128,9,128,128] ; SSE42-NEXT: movdqa %xmm0, %xmm3 ; SSE42-NEXT: pshufb %xmm2, %xmm3 -; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10] -; SSE42-NEXT: movdqa %xmm0, %xmm5 +; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,128,2,3,128,5,6,128,8,9,128,11,12,128,14,15] +; SSE42-NEXT: movdqa %xmm1, %xmm5 ; SSE42-NEXT: pshufb %xmm4, %xmm5 -; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15] +; SSE42-NEXT: por %xmm3, %xmm5 +; SSE42-NEXT: movdqa %xmm1, %xmm3 +; SSE42-NEXT: pshufb %xmm2, %xmm3 +; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: pshufb %xmm4, %xmm2 +; SSE42-NEXT: por %xmm3, %xmm2 +; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,0,3,4,1,6,7,2,9,10,3,12,13,4,15] +; SSE42-NEXT: movdqa %xmm0, %xmm4 +; SSE42-NEXT: pshufb %xmm3, %xmm4 +; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,1,2,11,4,5,12,7,8,13,10,11,14,13,14,15] ; SSE42-NEXT: pshufb %xmm6, %xmm0 ; SSE42-NEXT: movdqa %xmm1, %xmm7 -; SSE42-NEXT: pshufb %xmm2, %xmm7 -; SSE42-NEXT: movdqa %xmm1, %xmm2 -; SSE42-NEXT: pshufb %xmm4, %xmm2 +; SSE42-NEXT: pshufb %xmm3, %xmm7 ; SSE42-NEXT: pshufb %xmm6, %xmm1 ; SSE42-NEXT: movdqa %xmm1, 80(%rdi) -; SSE42-NEXT: movdqa %xmm2, 64(%rdi) ; SSE42-NEXT: movdqa %xmm7, 48(%rdi) ; SSE42-NEXT: movdqa %xmm0, 32(%rdi) +; SSE42-NEXT: movdqa %xmm4, (%rdi) +; SSE42-NEXT: movdqa %xmm2, 64(%rdi) ; SSE42-NEXT: movdqa %xmm5, 16(%rdi) -; SSE42-NEXT: movdqa %xmm3, (%rdi) ; SSE42-NEXT: retq ; ; AVX1-LABEL: splat3_256: diff --git a/llvm/test/CodeGen/X86/pr34592.ll b/llvm/test/CodeGen/X86/pr34592.ll --- a/llvm/test/CodeGen/X86/pr34592.ll +++ b/llvm/test/CodeGen/X86/pr34592.ll @@ -14,39 +14,30 @@ ; CHECK-NEXT: vmovaps %ymm4, %ymm10 ; CHECK-NEXT: vmovaps %ymm3, %ymm9 ; CHECK-NEXT: vmovaps %ymm1, %ymm8 -; CHECK-NEXT: vmovaps %ymm0, %ymm4 -; CHECK-NEXT: vmovaps 240(%rbp), %ymm1 -; CHECK-NEXT: vmovaps 208(%rbp), %ymm3 -; CHECK-NEXT: vmovaps 176(%rbp), %ymm0 -; CHECK-NEXT: vmovaps 144(%rbp), %ymm0 +; CHECK-NEXT: vmovaps 240(%rbp), %ymm4 +; CHECK-NEXT: vmovaps 208(%rbp), %ymm1 +; CHECK-NEXT: vmovaps 176(%rbp), %ymm3 +; CHECK-NEXT: vmovaps 144(%rbp), %ymm3 ; CHECK-NEXT: vmovaps 112(%rbp), %ymm11 ; CHECK-NEXT: vmovaps 80(%rbp), %ymm11 ; CHECK-NEXT: vmovaps 48(%rbp), %ymm11 ; CHECK-NEXT: vmovaps 16(%rbp), %ymm11 -; CHECK-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm2[6,7] -; CHECK-NEXT: vmovaps %xmm3, %xmm6 -; CHECK-NEXT: # implicit-def: $ymm2 -; CHECK-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm2 -; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm4[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] -; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,0] -; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7] -; CHECK-NEXT: vextracti128 $1, %ymm7, %xmm2 -; CHECK-NEXT: vmovq {{.*#+}} xmm6 = xmm2[0],zero -; CHECK-NEXT: # implicit-def: $ymm2 -; CHECK-NEXT: vmovaps %xmm6, %xmm2 -; CHECK-NEXT: # kill: def $xmm4 killed $xmm4 killed $ymm4 -; CHECK-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 -; CHECK-NEXT: vmovaps %xmm7, %xmm4 -; CHECK-NEXT: vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7] -; CHECK-NEXT: # implicit-def: $ymm4 -; CHECK-NEXT: vmovaps %xmm6, %xmm4 -; CHECK-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23] -; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] -; CHECK-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] -; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3],ymm7[4,5,6,7] -; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,1,3] -; CHECK-NEXT: vpshufd {{.*#+}} ymm4 = ymm5[0,1,0,1,4,5,4,5] -; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm2[6,7] +; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm1[0],ymm3[2],ymm1[2] +; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,1,2,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5],ymm0[6,7] +; CHECK-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm5[4,5,6,7] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7] +; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm1[0],ymm3[0],ymm1[2],ymm3[2] +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm7[2,3],ymm6[0,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7] +; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm5[0],ymm7[2],ymm5[2] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] +; CHECK-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3,4,5],ymm1[6,7] ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: .cfi_def_cfa %rsp, 8 diff --git a/llvm/test/CodeGen/X86/pr44976.ll b/llvm/test/CodeGen/X86/pr44976.ll --- a/llvm/test/CodeGen/X86/pr44976.ll +++ b/llvm/test/CodeGen/X86/pr44976.ll @@ -12,57 +12,75 @@ ; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero ; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; CHECK-NEXT: movd %r9d, %xmm0 +; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; CHECK-NEXT: movd %r9d, %xmm1 ; CHECK-NEXT: movd %r8d, %xmm3 -; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] +; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] ; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; CHECK-NEXT: movd %ecx, %xmm0 +; CHECK-NEXT: movd %ecx, %xmm1 ; CHECK-NEXT: movd %edx, %xmm2 -; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] ; CHECK-NEXT: movd %esi, %xmm4 -; CHECK-NEXT: movd %edi, %xmm0 -; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; CHECK-NEXT: movd %edi, %xmm1 +; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; CHECK-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm4 +; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm4 +; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm4 ; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm2 ; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm2 ; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm2 -; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm3 -; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm3 -; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm3 -; CHECK-NEXT: movdqa %xmm0, %xmm4 -; CHECK-NEXT: pmulhuw %xmm1, %xmm4 -; CHECK-NEXT: pmullw %xmm1, %xmm0 -; CHECK-NEXT: movdqa %xmm0, %xmm1 -; CHECK-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; CHECK-NEXT: movdqa %xmm1, %xmm3 +; CHECK-NEXT: pmulhuw %xmm0, %xmm3 +; CHECK-NEXT: pmullw %xmm0, %xmm1 +; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,2,3,3] +; CHECK-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; CHECK-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] +; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] +; CHECK-NEXT: movdqa %xmm2, %xmm7 +; CHECK-NEXT: pmulhuw %xmm4, %xmm7 +; CHECK-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,0,2,1,4,5,6,7] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0] +; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,0,65535,0,65535,0] +; CHECK-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7] +; CHECK-NEXT: pmullw %xmm4, %xmm2 +; CHECK-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,3,4,5,6,7] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0] +; CHECK-NEXT: pand %xmm3, %xmm5 +; CHECK-NEXT: pandn %xmm6, %xmm3 +; CHECK-NEXT: por %xmm5, %xmm3 +; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3] ; CHECK-NEXT: movdqa %xmm3, %xmm4 -; CHECK-NEXT: pmulhuw %xmm2, %xmm4 -; CHECK-NEXT: pmullw %xmm2, %xmm3 -; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; CHECK-NEXT: movdqa %xmm0, %xmm2 -; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3] -; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,1,3] -; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; CHECK-NEXT: paddd %xmm2, %xmm0 -; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3] -; CHECK-NEXT: paddd %xmm4, %xmm1 -; CHECK-NEXT: movdqa %xmm0, %xmm2 -; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3] -; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3] -; CHECK-NEXT: paddd %xmm2, %xmm0 +; CHECK-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,3] +; CHECK-NEXT: movdqa %xmm0, %xmm5 +; CHECK-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm4[2,0] +; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3] +; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,3,3,3] +; CHECK-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; CHECK-NEXT: movdqa %xmm3, %xmm6 +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm1[0] +; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[2,0] +; CHECK-NEXT: paddd %xmm5, %xmm0 +; CHECK-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; CHECK-NEXT: paddd %xmm4, %xmm3 +; CHECK-NEXT: movdqa %xmm0, %xmm1 +; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] +; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,3] +; CHECK-NEXT: paddd %xmm1, %xmm0 ; CHECK-NEXT: retq entry: %a32 = zext <12 x i16> %a to <12 x i32> diff --git a/llvm/test/CodeGen/X86/pr51615.ll b/llvm/test/CodeGen/X86/pr51615.ll --- a/llvm/test/CodeGen/X86/pr51615.ll +++ b/llvm/test/CodeGen/X86/pr51615.ll @@ -11,15 +11,14 @@ ; AVX-LABEL: volatile_load_2_elts: ; AVX: # %bb.0: ; AVX-NEXT: vmovaps g0(%rip), %xmm0 -; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 -; AVX-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2] -; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3] -; AVX-NEXT: vmovapd %ymm0, (%rax) -; AVX-NEXT: vmovapd %ymm1, (%rax) +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 +; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] +; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] +; AVX-NEXT: vmovaps %ymm0, (%rax) +; AVX-NEXT: vmovaps %ymm1, (%rax) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll @@ -74,12 +74,12 @@ ; SSE-NEXT: pandn %xmm2, %xmm3 ; SSE-NEXT: por %xmm4, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1] -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,65535,65535,65535,65535,65535] -; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7] -; SSE-NEXT: pandn %xmm0, %xmm2 -; SSE-NEXT: por %xmm1, %xmm2 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 ; SSE-NEXT: movq %xmm2, 16(%rcx) ; SSE-NEXT: movdqa %xmm3, (%rcx) ; SSE-NEXT: retq @@ -149,42 +149,28 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <24 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm3 -; SSE-NEXT: movdqa (%rsi), %xmm2 +; SSE-NEXT: movdqa (%rsi), %xmm0 ; SSE-NEXT: movdqa (%rdx), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pand %xmm2, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: pandn %xmm4, %xmm5 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2] -; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: por %xmm5, %xmm3 -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: pandn %xmm4, %xmm5 -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm2 -; SSE-NEXT: por %xmm5, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0] +; SSE-NEXT: pandn %xmm4, %xmm2 +; SSE-NEXT: por %xmm3, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0] +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: pand %xmm3, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3] +; SSE-NEXT: pandn %xmm5, %xmm3 +; SSE-NEXT: por %xmm4, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,65535,65535,0,65535,65535,0,65535] +; SSE-NEXT: pand %xmm4, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; SSE-NEXT: pand %xmm5, %xmm1 -; SSE-NEXT: pandn %xmm6, %xmm5 -; SSE-NEXT: por %xmm1, %xmm5 -; SSE-NEXT: pand %xmm0, %xmm5 -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: movdqa %xmm0, 16(%rcx) -; SSE-NEXT: movdqa %xmm2, 32(%rcx) -; SSE-NEXT: movdqa %xmm3, (%rcx) +; SSE-NEXT: pandn %xmm1, %xmm4 +; SSE-NEXT: por %xmm0, %xmm4 +; SSE-NEXT: movdqa %xmm4, 16(%rcx) +; SSE-NEXT: movdqa %xmm3, 32(%rcx) +; SSE-NEXT: movdqa %xmm2, (%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf8: @@ -308,207 +294,99 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <48 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm4 -; SSE-NEXT: movdqa 16(%rdi), %xmm7 -; SSE-NEXT: movdqa (%rsi), %xmm3 -; SSE-NEXT: movdqa 16(%rsi), %xmm5 -; SSE-NEXT: movdqa (%rdx), %xmm9 -; SSE-NEXT: movdqa 16(%rdx), %xmm10 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,3,3,3] -; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: movdqa %xmm14, %xmm0 -; SSE-NEXT: pandn %xmm2, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,1,2,2] -; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm5[3,3,3,3,4,5,6,7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm14, %xmm5 -; SSE-NEXT: por %xmm0, %xmm5 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,0,0,0] -; SSE-NEXT: movdqa %xmm14, %xmm0 -; SSE-NEXT: pandn %xmm2, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm14, %xmm2 -; SSE-NEXT: por %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,3,3,3] -; SSE-NEXT: movdqa %xmm14, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm4[1,1,2,2] -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm3[3,3,3,3,4,5,6,7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm14, %xmm3 -; SSE-NEXT: por %xmm7, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[0,0,0,0] -; SSE-NEXT: movdqa %xmm14, %xmm6 -; SSE-NEXT: pandn %xmm7, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm14, %xmm4 -; SSE-NEXT: por %xmm6, %xmm4 -; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,0] -; SSE-NEXT: movdqa %xmm6, %xmm7 -; SSE-NEXT: pandn %xmm8, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,2,2] -; SSE-NEXT: pand %xmm6, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pand %xmm14, %xmm0 -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm14, %xmm1 -; SSE-NEXT: pandn %xmm7, %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,2,2] -; SSE-NEXT: pand %xmm6, %xmm0 -; SSE-NEXT: pandn %xmm12, %xmm6 +; SSE-NEXT: movdqa (%rsi), %xmm10 +; SSE-NEXT: movdqa 16(%rsi), %xmm3 +; SSE-NEXT: movdqa (%rdx), %xmm8 +; SSE-NEXT: movdqa 16(%rdx), %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0] +; SSE-NEXT: movdqa %xmm3, %xmm5 +; SSE-NEXT: pand %xmm1, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,2,3,3] +; SSE-NEXT: movdqa %xmm1, %xmm11 +; SSE-NEXT: pandn %xmm6, %xmm11 +; SSE-NEXT: por %xmm5, %xmm11 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535] +; SSE-NEXT: movdqa %xmm10, %xmm0 +; SSE-NEXT: pand %xmm5, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm6 +; SSE-NEXT: pandn %xmm9, %xmm6 ; SSE-NEXT: por %xmm0, %xmm6 -; SSE-NEXT: pand %xmm14, %xmm6 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm0, %xmm14 -; SSE-NEXT: por %xmm6, %xmm14 -; SSE-NEXT: movdqa %xmm14, 16(%rcx) -; SSE-NEXT: movdqa %xmm1, 64(%rcx) -; SSE-NEXT: movdqa %xmm4, (%rcx) -; SSE-NEXT: movdqa %xmm3, 32(%rcx) -; SSE-NEXT: movdqa %xmm2, 48(%rcx) -; SSE-NEXT: movdqa %xmm5, 80(%rcx) +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm3, %xmm4 +; SSE-NEXT: pand %xmm0, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,0,0,0] +; SSE-NEXT: movdqa %xmm0, %xmm7 +; SSE-NEXT: pandn %xmm9, %xmm7 +; SSE-NEXT: por %xmm4, %xmm7 +; SSE-NEXT: movdqa %xmm10, %xmm4 +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[2,2,3,3] +; SSE-NEXT: pandn %xmm2, %xmm1 +; SSE-NEXT: por %xmm4, %xmm1 +; SSE-NEXT: pand %xmm5, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[1,1,2,2] +; SSE-NEXT: pandn %xmm2, %xmm5 +; SSE-NEXT: por %xmm3, %xmm5 +; SSE-NEXT: pand %xmm0, %xmm10 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,0,0,0] +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: por %xmm10, %xmm0 +; SSE-NEXT: movdqa %xmm0, (%rcx) +; SSE-NEXT: movdqa %xmm5, 16(%rcx) +; SSE-NEXT: movdqa %xmm1, 32(%rcx) +; SSE-NEXT: movdqa %xmm7, 48(%rcx) +; SSE-NEXT: movdqa %xmm6, 64(%rcx) +; SSE-NEXT: movdqa %xmm11, 80(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %xmm0 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2] -; AVX1-NEXT: vmovdqa (%rsi), %xmm3 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm4 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7] -; AVX1-NEXT: vmovdqa (%rdx), %xmm5 -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6 -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5],xmm7[6],xmm2[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,0,1,2,3,u,u,4,5,6,7,u,u,8,9] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5],xmm2[6],xmm7[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,5,6,7,u,u,8,9,10,11,u,u,12,13,14,15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[3,3,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,10,11,u,u,8,9,14,15,u,u,12,13] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2],xmm2[3],xmm7[4,5],xmm2[6],xmm7[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6],xmm1[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7] -; AVX1-NEXT: vmovdqa %xmm0, (%rcx) -; AVX1-NEXT: vmovdqa %xmm1, 64(%rcx) -; AVX1-NEXT: vmovdqa %xmm2, 80(%rcx) -; AVX1-NEXT: vmovdqa %xmm10, 32(%rcx) -; AVX1-NEXT: vmovdqa %xmm9, 48(%rcx) -; AVX1-NEXT: vmovdqa %xmm8, 16(%rcx) +; AVX1-NEXT: vmovdqa (%rsi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 +; AVX1-NEXT: vmovdqa (%rdx), %xmm2 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6],xmm4[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm0[1,2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5],xmm6[6],xmm1[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3,4],xmm7[5],xmm0[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3,4],xmm3[5],xmm1[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm0, 32(%rcx) +; AVX1-NEXT: vmovdqa %xmm1, 48(%rcx) +; AVX1-NEXT: vmovdqa %xmm7, (%rcx) +; AVX1-NEXT: vmovdqa %xmm6, 16(%rcx) +; AVX1-NEXT: vmovdqa %xmm5, 64(%rcx) +; AVX1-NEXT: vmovdqa %xmm4, 80(%rcx) ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: vf16: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm0 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm1 -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm2 -; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm3 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm5 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11] -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = -; AVX2-SLOW-NEXT: vpermd %ymm1, %ymm4, %ymm4 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,2,2] -; AVX2-SLOW-NEXT: vmovdqa 16(%rdx), %xmm6 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u> -; AVX2-SLOW-NEXT: vpermd %ymm1, %ymm4, %ymm1 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = -; AVX2-SLOW-NEXT: vpermd (%rdi), %ymm1, %ymm1 -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, 64(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm2, (%rcx) -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: vf16: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm0 -; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm1 -; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm2 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u] -; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm4 -; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm5 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7] -; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11] -; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = -; AVX2-FAST-NEXT: vpermd %ymm1, %ymm3, %ymm3 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,2] -; AVX2-FAST-NEXT: vmovdqa 16(%rdx), %xmm6 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3],xmm3[4],xmm7[5,6],xmm3[7] -; AVX2-FAST-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm3, %ymm5, %ymm3 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = <2,u,3,3,u,4,4,u> -; AVX2-FAST-NEXT: vpermd %ymm1, %ymm5, %ymm1 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = -; AVX2-FAST-NEXT: vpermd (%rdi), %ymm1, %ymm1 -; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm3, 64(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm2, (%rcx) -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: retq +; AVX2-LABEL: vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rsi), %ymm0 +; AVX2-NEXT: vmovdqa (%rdx), %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = +; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] +; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = <2,u,3,3,u,4,4,u> +; AVX2-NEXT: vpermd %ymm1, %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255] +; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = <5,5,u,6,6,u,7,7> +; AVX2-NEXT: vpermd %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0] +; AVX2-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vmovdqa %ymm0, 64(%rcx) +; AVX2-NEXT: vmovdqa %ymm3, 32(%rcx) +; AVX2-NEXT: vmovdqa %ymm2, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: vf16: ; AVX512: # %bb.0: @@ -540,447 +418,196 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <96 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf32: ; SSE: # %bb.0: -; SSE-NEXT: movdqa 16(%rdi), %xmm5 -; SSE-NEXT: movdqa 32(%rdi), %xmm4 -; SSE-NEXT: movdqa 48(%rdi), %xmm15 -; SSE-NEXT: movdqa 16(%rsi), %xmm8 -; SSE-NEXT: movdqa 32(%rsi), %xmm7 +; SSE-NEXT: movdqa (%rsi), %xmm11 +; SSE-NEXT: movdqa 16(%rsi), %xmm12 +; SSE-NEXT: movdqa 32(%rsi), %xmm13 +; SSE-NEXT: movdqa 48(%rsi), %xmm15 +; SSE-NEXT: movaps 16(%rdx), %xmm0 +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 32(%rdx), %xmm0 +; SSE-NEXT: movdqa 48(%rdx), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0] +; SSE-NEXT: movdqa %xmm15, %xmm3 +; SSE-NEXT: pand %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm5 +; SSE-NEXT: pandn %xmm4, %xmm5 +; SSE-NEXT: por %xmm3, %xmm5 +; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535] +; SSE-NEXT: movdqa %xmm13, %xmm3 +; SSE-NEXT: pand %xmm5, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm7 +; SSE-NEXT: pandn %xmm4, %xmm7 +; SSE-NEXT: por %xmm3, %xmm7 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 48(%rsi), %xmm13 -; SSE-NEXT: movdqa 16(%rdx), %xmm6 -; SSE-NEXT: movdqa 32(%rdx), %xmm9 -; SSE-NEXT: movdqa 48(%rdx), %xmm12 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[3,3,3,3] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,0,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm12, %xmm3 +; SSE-NEXT: pand %xmm7, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE-NEXT: movdqa %xmm7, %xmm14 +; SSE-NEXT: pandn %xmm1, %xmm14 +; SSE-NEXT: por %xmm3, %xmm14 +; SSE-NEXT: movdqa %xmm11, %xmm1 +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,2,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm10 +; SSE-NEXT: pandn %xmm4, %xmm10 +; SSE-NEXT: por %xmm1, %xmm10 ; SSE-NEXT: movdqa %xmm15, %xmm1 -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3] -; SSE-NEXT: movdqa %xmm13, %xmm3 -; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: por %xmm2, %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,0,0,0] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm3, %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: movdqa %xmm4, %xmm1 -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3] -; SSE-NEXT: movdqa %xmm7, %xmm3 -; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm11 -; SSE-NEXT: por %xmm2, %xmm11 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,0,0,0] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm0, %xmm10 -; SSE-NEXT: por %xmm3, %xmm10 -; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: movdqa %xmm8, %xmm3 -; SSE-NEXT: movdqa %xmm8, %xmm1 -; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm7 -; SSE-NEXT: por %xmm2, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: movdqa %xmm5, %xmm1 -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm0, %xmm6 -; SSE-NEXT: por %xmm2, %xmm6 -; SSE-NEXT: movdqa (%rdi), %xmm9 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[3,3,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn %xmm2, %xmm1 -; SSE-NEXT: movdqa (%rsi), %xmm14 -; SSE-NEXT: movdqa (%rdx), %xmm8 -; SSE-NEXT: movdqa %xmm14, %xmm2 -; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,5,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm5 -; SSE-NEXT: por %xmm1, %xmm5 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,0,0,0] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn %xmm2, %xmm1 -; SSE-NEXT: movdqa %xmm9, %xmm2 -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,7,5,4,5] -; SSE-NEXT: pand %xmm0, %xmm4 +; SSE-NEXT: pand %xmm5, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm9 +; SSE-NEXT: pandn %xmm6, %xmm9 +; SSE-NEXT: por %xmm1, %xmm9 +; SSE-NEXT: movdqa %xmm13, %xmm1 +; SSE-NEXT: pand %xmm7, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: movdqa %xmm7, %xmm8 +; SSE-NEXT: pandn %xmm0, %xmm8 +; SSE-NEXT: por %xmm1, %xmm8 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm4 +; SSE-NEXT: pandn %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm12, %xmm1 +; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: por %xmm1, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[1,1,2,2] -; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,0,65535,65535,0] -; SSE-NEXT: movdqa %xmm15, %xmm1 -; SSE-NEXT: pandn %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[1,1,2,2] -; SSE-NEXT: pand %xmm15, %xmm2 -; SSE-NEXT: por %xmm1, %xmm2 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm0, %xmm12 -; SSE-NEXT: pandn %xmm1, %xmm12 -; SSE-NEXT: pand %xmm0, %xmm2 -; SSE-NEXT: por %xmm2, %xmm12 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload -; SSE-NEXT: # xmm1 = mem[1,1,2,2] -; SSE-NEXT: movdqa %xmm15, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload -; SSE-NEXT: # xmm1 = mem[1,1,2,2] -; SSE-NEXT: pand %xmm15, %xmm1 -; SSE-NEXT: por %xmm2, %xmm1 -; SSE-NEXT: pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; SSE-NEXT: # xmm2 = mem[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm0, %xmm13 -; SSE-NEXT: pandn %xmm2, %xmm13 -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm1, %xmm13 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload -; SSE-NEXT: # xmm1 = mem[1,1,2,2] -; SSE-NEXT: movdqa %xmm15, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload -; SSE-NEXT: # xmm1 = mem[1,1,2,2] -; SSE-NEXT: pand %xmm15, %xmm1 -; SSE-NEXT: por %xmm2, %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm3, %xmm2 -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm1, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[1,1,2,2] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,2,2] -; SSE-NEXT: pand %xmm15, %xmm3 -; SSE-NEXT: pandn %xmm1, %xmm15 -; SSE-NEXT: por %xmm3, %xmm15 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[3,3,3,3,4,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm15 -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 +; SSE-NEXT: movdqa %xmm11, %xmm3 +; SSE-NEXT: pand %xmm5, %xmm3 +; SSE-NEXT: por %xmm3, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] +; SSE-NEXT: movdqa %xmm7, %xmm0 +; SSE-NEXT: pandn %xmm3, %xmm0 +; SSE-NEXT: pand %xmm7, %xmm15 ; SSE-NEXT: por %xmm15, %xmm0 -; SSE-NEXT: movdqa %xmm0, 16(%rcx) -; SSE-NEXT: movdqa %xmm2, 64(%rcx) -; SSE-NEXT: movdqa %xmm13, 112(%rcx) -; SSE-NEXT: movdqa %xmm12, 160(%rcx) -; SSE-NEXT: movdqa %xmm4, (%rcx) -; SSE-NEXT: movdqa %xmm5, 32(%rcx) -; SSE-NEXT: movdqa %xmm6, 48(%rcx) -; SSE-NEXT: movdqa %xmm7, 80(%rcx) -; SSE-NEXT: movdqa %xmm10, 96(%rcx) -; SSE-NEXT: movdqa %xmm11, 128(%rcx) +; SSE-NEXT: movdqa (%rdx), %xmm3 +; SSE-NEXT: pand %xmm2, %xmm13 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,2,3,3] +; SSE-NEXT: pandn %xmm6, %xmm2 +; SSE-NEXT: por %xmm13, %xmm2 +; SSE-NEXT: pand %xmm5, %xmm12 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2] +; SSE-NEXT: pandn %xmm6, %xmm5 +; SSE-NEXT: por %xmm12, %xmm5 +; SSE-NEXT: pand %xmm7, %xmm11 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] +; SSE-NEXT: pandn %xmm3, %xmm7 +; SSE-NEXT: por %xmm11, %xmm7 +; SSE-NEXT: movdqa %xmm7, (%rcx) +; SSE-NEXT: movdqa %xmm5, 16(%rcx) +; SSE-NEXT: movdqa %xmm2, 32(%rcx) +; SSE-NEXT: movdqa %xmm0, 48(%rcx) +; SSE-NEXT: movdqa %xmm1, 64(%rcx) +; SSE-NEXT: movdqa %xmm4, 80(%rcx) +; SSE-NEXT: movdqa %xmm8, 96(%rcx) +; SSE-NEXT: movdqa %xmm9, 112(%rcx) +; SSE-NEXT: movdqa %xmm10, 128(%rcx) +; SSE-NEXT: movdqa %xmm14, 144(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rcx) +; SSE-NEXT: movaps %xmm0, 160(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 176(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %xmm13 -; AVX1-NEXT: vmovdqa 32(%rdi), %xmm7 -; AVX1-NEXT: vmovdqa 48(%rdi), %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,1,2,2] -; AVX1-NEXT: vmovdqa (%rsi), %xmm14 +; AVX1-NEXT: vmovdqa (%rsi), %xmm9 +; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3 -; AVX1-NEXT: vmovdqa 48(%rsi), %xmm6 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7] -; AVX1-NEXT: vmovdqa (%rdx), %xmm15 -; AVX1-NEXT: vmovdqa 32(%rdx), %xmm5 -; AVX1-NEXT: vmovdqa 48(%rdx), %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7] -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm13[1,1,2,2] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm14[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3,4],xmm4[5],xmm0[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm15[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7] -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7] -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3],xmm0[4],xmm4[5,6],xmm0[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm6[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm12 = xmm0[0,1],xmm4[2],xmm0[3,4],xmm4[5],xmm0[6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,2,3,6,7,4,5,6,7,4,5,8,9] -; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,5,6,7,4,5,8,9,10,11,10,11,12,13,14,15] -; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm7[3,3,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm6[2],xmm0[3,4],xmm6[5],xmm0[6,7] -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm0 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11] -; AVX1-NEXT: vpshufb %xmm10, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa 48(%rsi), %xmm4 +; AVX1-NEXT: vmovdqa (%rdx), %xmm5 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6 +; AVX1-NEXT: vmovdqa 32(%rdx), %xmm7 +; AVX1-NEXT: vmovdqa 48(%rdx), %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm9[0],xmm1[1],xmm9[2,3],xmm1[4],xmm9[5,6],xmm1[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6],xmm1[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm12 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5],xmm0[6],xmm3[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm13 = xmm1[0],xmm4[1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm3[0,1],xmm7[2],xmm3[3,4],xmm7[5],xmm3[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm6[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm9[1,2],xmm1[3],xmm9[4,5],xmm1[6],xmm9[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3],xmm6[4],xmm3[5,6],xmm6[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0],xmm2[1,2],xmm6[3],xmm2[4,5],xmm6[6],xmm2[7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3,4],xmm5[5],xmm3[6,7] -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm5 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX1-NEXT: vpshufb %xmm2, %xmm8, %xmm8 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[0,0,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5],xmm9[6],xmm8[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7] -; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm13[3,3,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1,2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5,6],xmm2[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3] -; AVX1-NEXT: vpshufb %xmm10, %xmm2, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm15[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3,4],xmm5[5],xmm2[6,7] -; AVX1-NEXT: vmovdqa %xmm2, (%rcx) -; AVX1-NEXT: vmovdqa %xmm0, 64(%rcx) -; AVX1-NEXT: vmovdqa %xmm4, 80(%rcx) -; AVX1-NEXT: vmovdqa %xmm1, 32(%rcx) -; AVX1-NEXT: vmovdqa %xmm8, 48(%rcx) -; AVX1-NEXT: vmovdqa %xmm3, 96(%rcx) -; AVX1-NEXT: vmovdqa %xmm6, 128(%rcx) -; AVX1-NEXT: vmovdqa %xmm11, 144(%rcx) +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm9[0,1],xmm5[2],xmm9[3,4],xmm5[5],xmm9[6,7] +; AVX1-NEXT: vmovdqa %xmm5, (%rcx) +; AVX1-NEXT: vmovdqa %xmm2, 16(%rcx) +; AVX1-NEXT: vmovdqa %xmm3, 32(%rcx) +; AVX1-NEXT: vmovdqa %xmm4, 48(%rcx) +; AVX1-NEXT: vmovdqa %xmm1, 64(%rcx) +; AVX1-NEXT: vmovdqa %xmm0, 80(%rcx) +; AVX1-NEXT: vmovdqa %xmm7, 96(%rcx) +; AVX1-NEXT: vmovdqa %xmm13, 112(%rcx) ; AVX1-NEXT: vmovdqa %xmm12, 160(%rcx) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 176(%rcx) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 16(%rcx) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 112(%rcx) +; AVX1-NEXT: vmovdqa %xmm11, 176(%rcx) +; AVX1-NEXT: vmovdqa %xmm10, 128(%rcx) +; AVX1-NEXT: vmovdqa %xmm8, 144(%rcx) ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: vf32: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm9 -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm3 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm11 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm10 -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm5 -; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm6 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,1,2,2] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm1 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11] -; AVX2-SLOW-NEXT: vpshufb %xmm12, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = -; AVX2-SLOW-NEXT: vpermd %ymm10, %ymm13, %ymm7 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm7, %ymm8 -; AVX2-SLOW-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa 48(%rdx), %xmm7 -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = [8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX2-SLOW-NEXT: vpshufb %xmm14, %xmm2, %xmm2 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7] -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = -; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm3, %ymm7 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm7, %ymm15 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm4[1,1,2,2] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm1[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2],xmm0[3,4],xmm7[5],xmm0[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] -; AVX2-SLOW-NEXT: vpshufb %xmm12, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vpermd %ymm11, %ymm13, %ymm1 -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vmovdqa 16(%rdx), %xmm1 -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; AVX2-SLOW-NEXT: vpshufb %xmm14, %xmm4, %xmm4 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6],xmm6[7] -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1 -; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm9, %ymm2 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm1, %ymm2, %ymm1 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21] -; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm3, %ymm3 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u> -; AVX2-SLOW-NEXT: vpermd %ymm11, %ymm4, %ymm6 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm3, %ymm6, %ymm3 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = -; AVX2-SLOW-NEXT: vpermd 32(%rdi), %ymm6, %ymm11 -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm3, %ymm11, %ymm3 -; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm9, %ymm2 -; AVX2-SLOW-NEXT: vpermd %ymm10, %ymm4, %ymm4 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm4, %ymm2 -; AVX2-SLOW-NEXT: vpermd (%rdi), %ymm6, %ymm4 -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 -; AVX2-SLOW-NEXT: vmovdqa %ymm2, 32(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, 128(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm1, 64(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm15, 160(%rcx) -; AVX2-SLOW-NEXT: vmovdqa %ymm8, (%rcx) -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-LABEL: vf32: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm10 -; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %ymm3 -; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm12 -; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm11 -; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm5 -; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %xmm4 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = -; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm5, %xmm7 -; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm6 -; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm1 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2],xmm2[3,4],xmm7[5],xmm2[6,7] -; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11] -; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm0, %xmm0 -; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm14 = -; AVX2-FAST-NEXT: vpermd %ymm11, %ymm14, %ymm7 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm7, %ymm0 -; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX2-FAST-NEXT: vmovdqa 48(%rdx), %xmm7 -; AVX2-FAST-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm15 = [8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13] -; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm2, %xmm2 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7] -; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm3, %ymm7 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm7, %ymm8 -; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm4, %xmm0 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3,4],xmm0[5],xmm7[6,7] -; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: vpermd %ymm12, %ymm14, %ymm1 -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: vmovdqa 16(%rdx), %xmm1 -; AVX2-FAST-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm4, %xmm4 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2] -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6],xmm6[7] -; AVX2-FAST-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1 -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm10, %ymm2 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm1, %ymm2, %ymm1 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21] -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm3, %ymm3 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u> -; AVX2-FAST-NEXT: vpermd %ymm12, %ymm4, %ymm6 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm3, %ymm6, %ymm3 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = -; AVX2-FAST-NEXT: vpermd 32(%rdi), %ymm6, %ymm9 -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm3, %ymm9, %ymm3 -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm10, %ymm2 -; AVX2-FAST-NEXT: vpermd %ymm11, %ymm4, %ymm4 -; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm2, %ymm4, %ymm2 -; AVX2-FAST-NEXT: vpermd (%rdi), %ymm6, %ymm4 -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 -; AVX2-FAST-NEXT: vmovdqa %ymm2, 32(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm3, 128(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm1, 64(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm0, 96(%rcx) -; AVX2-FAST-NEXT: vmovdqa %ymm8, 160(%rcx) -; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-NEXT: vmovaps %ymm0, (%rcx) -; AVX2-FAST-NEXT: vzeroupper -; AVX2-FAST-NEXT: retq +; AVX2-LABEL: vf32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rsi), %ymm0 +; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 +; AVX2-NEXT: vmovdqa (%rdx), %ymm2 +; AVX2-NEXT: vmovdqa 32(%rdx), %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = +; AVX2-NEXT: vpermd %ymm2, %ymm4, %ymm5 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] +; AVX2-NEXT: vpblendvb %ymm6, %ymm0, %ymm5, %ymm5 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = <5,5,u,6,6,u,7,7> +; AVX2-NEXT: vpermd %ymm3, %ymm7, %ymm8 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0] +; AVX2-NEXT: vpblendvb %ymm9, %ymm1, %ymm8, %ymm8 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = <2,u,3,3,u,4,4,u> +; AVX2-NEXT: vpermd %ymm3, %ymm10, %ymm11 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255] +; AVX2-NEXT: vpblendvb %ymm12, %ymm0, %ymm11, %ymm11 +; AVX2-NEXT: vpermd %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpblendvb %ymm6, %ymm1, %ymm3, %ymm3 +; AVX2-NEXT: vpermd %ymm2, %ymm7, %ymm4 +; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm4, %ymm0 +; AVX2-NEXT: vpermd %ymm2, %ymm10, %ymm2 +; AVX2-NEXT: vpblendvb %ymm12, %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx) +; AVX2-NEXT: vmovdqa %ymm0, 64(%rcx) +; AVX2-NEXT: vmovdqa %ymm3, 96(%rcx) +; AVX2-NEXT: vmovdqa %ymm11, 128(%rcx) +; AVX2-NEXT: vmovdqa %ymm8, 160(%rcx) +; AVX2-NEXT: vmovdqa %ymm5, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: vf32: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,32,u,1,33,u,2,34,u,3,35,u,4,36,u,5,37,u,6,38,u,7,39,u,8,40,u,9,41,u,10,42> +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,32,3,4,33,6,7,34,9,10,35,12,13,36,15,16,37,18,19,38,21,22,39,24,25,40,27,28,41,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [42,1,2,43,4,5,44,7,8,45,10,11,46,13,14,47,16,17,48,19,20,49,22,23,50,25,26,51,28,29,52,31] ; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,32,3,4,33,6,7,34,9,10,35,12,13,36,15,16,37,18,19,38,21,22,39,24,25,40,27,28,41,30,31] -; AVX512-NEXT: vpermi2w %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,43,u,11,44,u,12,45,u,13,46,u,14,47,u,15,48,u,16,49,u,17,50,u,18,51,u,19,52,u,20,53> -; AVX512-NEXT: vpermi2w %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,43,3,4,44,6,7,45,9,10,46,12,13,47,15,16,48,18,19,49,21,22,50,24,25,51,27,28,52,30,31] -; AVX512-NEXT: vpermi2w %zmm1, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <21,53,u,22,54,u,23,55,u,24,56,u,25,57,u,26,58,u,27,59,u,28,60,u,29,61,u,30,62,u,31,63> -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,54,3,4,55,6,7,56,9,10,57,12,13,58,15,16,59,18,19,60,21,22,61,24,25,62,27,28,63,30,31] -; AVX512-NEXT: vpermi2w %zmm0, %zmm3, %zmm1 -; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rcx) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,53,2,3,54,5,6,55,8,9,56,11,12,57,14,15,58,17,18,59,20,21,60,23,24,61,26,27,62,29,30,63] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rcx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <32 x i16>, <32 x i16>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll @@ -324,107 +324,107 @@ ; ; AVX1-LABEL: vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rcx), %xmm5 -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm8 -; AVX1-NEXT: vmovdqa (%rdx), %xmm6 -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm9 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] +; AVX1-NEXT: vmovdqa (%rcx), %xmm8 +; AVX1-NEXT: vmovdqa 16(%rcx), %xmm5 +; AVX1-NEXT: vmovdqa (%rdx), %xmm9 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vmovdqa (%rsi), %xmm7 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm3 -; AVX1-NEXT: vmovdqa (%rdi), %xmm0 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm4 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm3 +; AVX1-NEXT: vmovdqa (%rsi), %xmm2 +; AVX1-NEXT: vmovdqa 16(%rsi), %xmm7 +; AVX1-NEXT: vmovdqa (%rdi), %xmm4 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm10, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] ; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm10, %ymm1 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm10, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm10, %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] -; AVX1-NEXT: vmovaps %ymm2, 96(%r8) -; AVX1-NEXT: vmovaps %ymm0, (%r8) -; AVX1-NEXT: vmovaps %ymm1, 32(%r8) -; AVX1-NEXT: vmovaps %ymm11, 64(%r8) +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7] +; AVX1-NEXT: vmovaps %ymm2, (%r8) +; AVX1-NEXT: vmovaps %ymm0, 96(%r8) +; AVX1-NEXT: vmovaps %ymm1, 64(%r8) +; AVX1-NEXT: vmovaps %ymm11, 32(%r8) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: vf16: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa (%rcx), %xmm5 -; AVX2-NEXT: vmovdqa 16(%rcx), %xmm8 -; AVX2-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-NEXT: vmovdqa 16(%rdx), %xmm9 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] +; AVX2-NEXT: vmovdqa (%rcx), %xmm8 +; AVX2-NEXT: vmovdqa 16(%rcx), %xmm5 +; AVX2-NEXT: vmovdqa (%rdx), %xmm9 +; AVX2-NEXT: vmovdqa 16(%rdx), %xmm6 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX2-NEXT: vmovdqa (%rsi), %xmm7 -; AVX2-NEXT: vmovdqa 16(%rsi), %xmm3 -; AVX2-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-NEXT: vmovdqa 16(%rdi), %xmm4 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa (%rsi), %xmm2 +; AVX2-NEXT: vmovdqa 16(%rsi), %xmm7 +; AVX2-NEXT: vmovdqa (%rdi), %xmm4 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm10, %ymm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] ; AVX2-NEXT: vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm10, %ymm1 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero -; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm10, %ymm2 -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] -; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm10, %ymm3 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm5, %ymm0 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] -; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] -; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] -; AVX2-NEXT: vmovdqa %ymm2, 96(%r8) -; AVX2-NEXT: vmovdqa %ymm0, (%r8) -; AVX2-NEXT: vmovdqa %ymm1, 32(%r8) -; AVX2-NEXT: vmovdqa %ymm11, 64(%r8) +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3 +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7] +; AVX2-NEXT: vmovdqa %ymm2, (%r8) +; AVX2-NEXT: vmovdqa %ymm0, 96(%r8) +; AVX2-NEXT: vmovdqa %ymm1, 64(%r8) +; AVX2-NEXT: vmovdqa %ymm11, 32(%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -550,207 +550,207 @@ ; ; AVX1-LABEL: vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rcx), %xmm15 -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm12 -; AVX1-NEXT: vmovdqa 32(%rcx), %xmm11 -; AVX1-NEXT: vmovdqa 48(%rcx), %xmm2 -; AVX1-NEXT: vmovdqa (%rdx), %xmm6 -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm13 -; AVX1-NEXT: vmovdqa 32(%rdx), %xmm1 -; AVX1-NEXT: vmovdqa 48(%rdx), %xmm7 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm8 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm14 -; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm5 -; AVX1-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3] +; AVX1-NEXT: vmovdqa (%rcx), %xmm12 +; AVX1-NEXT: vmovdqa 16(%rcx), %xmm15 +; AVX1-NEXT: vmovdqa 32(%rcx), %xmm3 +; AVX1-NEXT: vmovdqa 48(%rcx), %xmm11 +; AVX1-NEXT: vmovdqa (%rdx), %xmm13 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6 +; AVX1-NEXT: vmovdqa 32(%rdx), %xmm7 +; AVX1-NEXT: vmovdqa 48(%rdx), %xmm1 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm8 +; AVX1-NEXT: vmovdqa (%rsi), %xmm14 +; AVX1-NEXT: vmovdqa 48(%rsi), %xmm2 +; AVX1-NEXT: vmovdqa (%rdi), %xmm5 +; AVX1-NEXT: vmovdqa 48(%rdi), %xmm4 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm0 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3] ; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm8, %ymm8 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm10, %ymm0 -; AVX1-NEXT: vmovdqa 48(%rsi), %xmm10 +; AVX1-NEXT: vmovdqa 32(%rsi), %xmm10 ; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7] -; AVX1-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3] +; AVX1-NEXT: vmovdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vmovdqa (%rsi), %xmm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] -; AVX1-NEXT: vmovdqa (%rdi), %xmm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX1-NEXT: vmovdqa 16(%rsi), %xmm4 +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1] +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm7, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3] ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7] -; AVX1-NEXT: vmovaps %ymm3, 96(%r8) -; AVX1-NEXT: vmovaps %ymm1, (%r8) -; AVX1-NEXT: vmovaps %ymm0, 32(%r8) -; AVX1-NEXT: vmovaps %ymm2, 192(%r8) -; AVX1-NEXT: vmovaps %ymm11, 224(%r8) -; AVX1-NEXT: vmovaps %ymm8, 128(%r8) -; AVX1-NEXT: vmovaps %ymm9, 160(%r8) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7] +; AVX1-NEXT: vmovaps %ymm2, (%r8) +; AVX1-NEXT: vmovaps %ymm1, 96(%r8) ; AVX1-NEXT: vmovaps %ymm0, 64(%r8) +; AVX1-NEXT: vmovaps %ymm3, 160(%r8) +; AVX1-NEXT: vmovaps %ymm11, 128(%r8) +; AVX1-NEXT: vmovaps %ymm8, 224(%r8) +; AVX1-NEXT: vmovaps %ymm9, 192(%r8) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, 32(%r8) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: vf32: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa (%rcx), %xmm15 -; AVX2-NEXT: vmovdqa 16(%rcx), %xmm12 -; AVX2-NEXT: vmovdqa 32(%rcx), %xmm11 -; AVX2-NEXT: vmovdqa 48(%rcx), %xmm2 -; AVX2-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-NEXT: vmovdqa 16(%rdx), %xmm13 -; AVX2-NEXT: vmovdqa 32(%rdx), %xmm1 -; AVX2-NEXT: vmovdqa 48(%rdx), %xmm7 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] -; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1] -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm8 -; AVX2-NEXT: vmovdqa 16(%rsi), %xmm14 -; AVX2-NEXT: vmovdqa 32(%rsi), %xmm3 -; AVX2-NEXT: vmovdqa 16(%rdi), %xmm5 -; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3] +; AVX2-NEXT: vmovdqa (%rcx), %xmm12 +; AVX2-NEXT: vmovdqa 16(%rcx), %xmm15 +; AVX2-NEXT: vmovdqa 32(%rcx), %xmm3 +; AVX2-NEXT: vmovdqa 48(%rcx), %xmm11 +; AVX2-NEXT: vmovdqa (%rdx), %xmm13 +; AVX2-NEXT: vmovdqa 16(%rdx), %xmm6 +; AVX2-NEXT: vmovdqa 32(%rdx), %xmm7 +; AVX2-NEXT: vmovdqa 48(%rdx), %xmm1 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm8 +; AVX2-NEXT: vmovdqa (%rsi), %xmm14 +; AVX2-NEXT: vmovdqa 48(%rsi), %xmm2 +; AVX2-NEXT: vmovdqa (%rdi), %xmm5 +; AVX2-NEXT: vmovdqa 48(%rdi), %xmm4 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm9, %ymm0 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7] ; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3] ; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm8 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm10, %ymm0 -; AVX2-NEXT: vmovdqa 48(%rsi), %xmm10 +; AVX2-NEXT: vmovdqa 32(%rsi), %xmm10 ; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7] -; AVX2-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3] +; AVX2-NEXT: vmovdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] ; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm8, %ymm1 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7] -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX2-NEXT: vmovdqa (%rsi), %xmm4 -; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7] -; AVX2-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3] +; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX2-NEXT: vmovdqa 16(%rsi), %xmm4 +; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7] ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0 -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1] +; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1] ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm7, %ymm3 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3] -; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1] -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm6, %ymm3 -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0 +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm7, %ymm2 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] +; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm6, %ymm2 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm4, %ymm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] -; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1] -; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1] +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3] ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero ; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3] ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7] -; AVX2-NEXT: vmovdqa %ymm3, 96(%r8) -; AVX2-NEXT: vmovdqa %ymm1, (%r8) -; AVX2-NEXT: vmovdqa %ymm0, 32(%r8) -; AVX2-NEXT: vmovdqa %ymm2, 192(%r8) -; AVX2-NEXT: vmovdqa %ymm11, 224(%r8) -; AVX2-NEXT: vmovdqa %ymm8, 128(%r8) -; AVX2-NEXT: vmovdqa %ymm9, 160(%r8) +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7] +; AVX2-NEXT: vmovdqa %ymm2, (%r8) +; AVX2-NEXT: vmovdqa %ymm1, 96(%r8) +; AVX2-NEXT: vmovdqa %ymm0, 64(%r8) +; AVX2-NEXT: vmovdqa %ymm3, 160(%r8) +; AVX2-NEXT: vmovdqa %ymm11, 128(%r8) +; AVX2-NEXT: vmovdqa %ymm8, 224(%r8) +; AVX2-NEXT: vmovdqa %ymm9, 192(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 64(%r8) +; AVX2-NEXT: vmovaps %ymm0, 32(%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll @@ -126,47 +126,28 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.vecptr2, <4 x i16>* %in.vecptr3, <4 x i16>* %in.vecptr4, <20 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf4: ; SSE: # %bb.0: -; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE-NEXT: movq {{.*#+}} xmm8 = mem[0],zero -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,1,1] -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,2,1] -; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,0,65535,65535,65535,0] -; SSE-NEXT: movdqa %xmm6, %xmm7 -; SSE-NEXT: pandn %xmm4, %xmm7 -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,1,1] -; SSE-NEXT: pand %xmm6, %xmm4 -; SSE-NEXT: por %xmm7, %xmm4 -; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,65535,65535] -; SSE-NEXT: pand %xmm7, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1] -; SSE-NEXT: movdqa %xmm7, %xmm5 -; SSE-NEXT: pandn %xmm3, %xmm5 -; SSE-NEXT: por %xmm4, %xmm5 -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm6, %xmm3 -; SSE-NEXT: pandn %xmm2, %xmm6 -; SSE-NEXT: por %xmm3, %xmm6 -; SSE-NEXT: pand %xmm7, %xmm6 -; SSE-NEXT: pandn %xmm9, %xmm7 -; SSE-NEXT: por %xmm6, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: movq %xmm0, 32(%r9) -; SSE-NEXT: movdqa %xmm7, 16(%r9) -; SSE-NEXT: movdqa %xmm5, (%r9) +; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,1] +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,65535,65535] +; SSE-NEXT: movdqa %xmm3, %xmm5 +; SSE-NEXT: pand %xmm4, %xmm5 +; SSE-NEXT: pandn %xmm1, %xmm4 +; SSE-NEXT: por %xmm5, %xmm4 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,65535,65535,0,65535] +; SSE-NEXT: pand %xmm5, %xmm3 +; SSE-NEXT: pandn %xmm1, %xmm5 +; SSE-NEXT: por %xmm3, %xmm5 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm1 +; SSE-NEXT: por %xmm0, %xmm1 +; SSE-NEXT: movq %xmm1, 32(%r9) +; SSE-NEXT: movdqa %xmm5, 16(%r9) +; SSE-NEXT: movdqa %xmm4, (%r9) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf4: @@ -290,286 +271,97 @@ define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <8 x i16>* %in.vecptr3, <8 x i16>* %in.vecptr4, <40 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf8: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm11 -; SSE-NEXT: movdqa (%rsi), %xmm13 -; SSE-NEXT: movdqa (%rdx), %xmm4 -; SSE-NEXT: movdqa (%rcx), %xmm2 -; SSE-NEXT: movdqa (%r8), %xmm8 +; SSE-NEXT: movdqa (%rdx), %xmm1 +; SSE-NEXT: movdqa (%rcx), %xmm5 +; SSE-NEXT: movdqa (%r8), %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535] +; SSE-NEXT: movdqa %xmm1, %xmm4 +; SSE-NEXT: pand %xmm0, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,1,0,1] +; SSE-NEXT: pandn %xmm8, %xmm0 +; SSE-NEXT: por %xmm4, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,65535,65535,0] +; SSE-NEXT: movdqa %xmm1, %xmm6 +; SSE-NEXT: pand %xmm4, %xmm6 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3] +; SSE-NEXT: pandn %xmm7, %xmm4 +; SSE-NEXT: por %xmm6, %xmm4 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm5, %xmm3 +; SSE-NEXT: pand %xmm6, %xmm3 +; SSE-NEXT: pandn %xmm7, %xmm6 +; SSE-NEXT: por %xmm3, %xmm6 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,65535,0,65535] +; SSE-NEXT: pand %xmm3, %xmm5 +; SSE-NEXT: pandn %xmm8, %xmm3 +; SSE-NEXT: por %xmm5, %xmm3 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,65535,65535] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm5, %xmm3 -; SSE-NEXT: pandn %xmm8, %xmm5 -; SSE-NEXT: por %xmm3, %xmm5 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,0,65535,65535,65535,0] -; SSE-NEXT: movdqa %xmm3, %xmm6 -; SSE-NEXT: pandn %xmm5, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,2,2] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535] -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm13[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm0, %xmm7 -; SSE-NEXT: pandn %xmm5, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,65535,65535] -; SSE-NEXT: pand %xmm7, %xmm0 -; SSE-NEXT: movdqa %xmm7, %xmm14 -; SSE-NEXT: pandn %xmm11, %xmm14 -; SSE-NEXT: por %xmm0, %xmm14 -; SSE-NEXT: movdqa %xmm13, %xmm0 -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm3, %xmm9 -; SSE-NEXT: pandn %xmm0, %xmm9 -; SSE-NEXT: movdqa %xmm2, %xmm10 -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm4[1,1,1,1] -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,1,0,1] -; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3] -; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: por %xmm9, %xmm1 -; SSE-NEXT: psrld $16, %xmm10 -; SSE-NEXT: movdqa %xmm7, %xmm6 -; SSE-NEXT: pandn %xmm10, %xmm6 -; SSE-NEXT: pand %xmm7, %xmm1 -; SSE-NEXT: por %xmm1, %xmm6 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1] -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: pandn %xmm1, %xmm4 -; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,1] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: por %xmm4, %xmm1 -; SSE-NEXT: pand %xmm7, %xmm1 -; SSE-NEXT: movdqa %xmm7, %xmm4 -; SSE-NEXT: pandn %xmm0, %xmm4 -; SSE-NEXT: por %xmm1, %xmm4 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: pand %xmm7, %xmm0 -; SSE-NEXT: psrlq $48, %xmm13 -; SSE-NEXT: movdqa %xmm7, %xmm1 -; SSE-NEXT: pandn %xmm13, %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm3, %xmm2 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: por %xmm2, %xmm3 -; SSE-NEXT: pand %xmm7, %xmm3 -; SSE-NEXT: pandn %xmm12, %xmm7 -; SSE-NEXT: por %xmm3, %xmm7 -; SSE-NEXT: movdqa %xmm7, 16(%r9) -; SSE-NEXT: movdqa %xmm1, 64(%r9) -; SSE-NEXT: movdqa %xmm4, (%r9) +; SSE-NEXT: pand %xmm5, %xmm1 +; SSE-NEXT: pandn %xmm2, %xmm5 +; SSE-NEXT: por %xmm1, %xmm5 +; SSE-NEXT: movdqa %xmm5, 32(%r9) +; SSE-NEXT: movdqa %xmm3, 16(%r9) ; SSE-NEXT: movdqa %xmm6, 48(%r9) -; SSE-NEXT: movdqa %xmm14, 32(%r9) +; SSE-NEXT: movdqa %xmm4, 64(%r9) +; SSE-NEXT: movdqa %xmm0, (%r9) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %xmm2 -; AVX1-NEXT: vmovdqa (%rsi), %xmm0 -; AVX1-NEXT: vmovdqa (%rdx), %xmm1 -; AVX1-NEXT: vmovdqa (%rcx), %xmm3 -; AVX1-NEXT: vmovdqa (%r8), %xmm4 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm4[3],xmm5[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[1,1,2,2] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm0[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6],xmm5[7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,0,2,1] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5,6],xmm7[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm7[4],xmm6[5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,6,7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2,3,4,5],xmm7[6],xmm5[7] -; AVX1-NEXT: vmovdqa %xmm5, 16(%r9) -; AVX1-NEXT: vmovdqa %xmm10, (%r9) -; AVX1-NEXT: vmovdqa %xmm9, 48(%r9) -; AVX1-NEXT: vmovdqa %xmm8, 32(%r9) -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6],xmm2[7] -; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7] +; AVX1-NEXT: vmovdqa (%rdx), %xmm0 +; AVX1-NEXT: vmovdqa (%rcx), %xmm1 +; AVX1-NEXT: vmovdqa (%r8), %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1],xmm1[2,3,4,5],xmm3[6],xmm1[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1,2,3,4],xmm5[5],xmm1[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm2, 32(%r9) +; AVX1-NEXT: vmovdqa %xmm1, 48(%r9) +; AVX1-NEXT: vmovdqa %xmm3, 16(%r9) +; AVX1-NEXT: vmovdqa %xmm4, (%r9) +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2],xmm0[3,4,5,6],xmm5[7] ; AVX1-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf8: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm3 -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm2 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm4 -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm5[6,7,u,u,u,u,10,11,u,u,8,9,u,u,u,u,22,23,u,u,u,u,26,27,u,u,24,25,u,u,u,u] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,6,7,10,11,u,u,8,9,u,u,8,9,12,13,u,u,22,23,26,27,u,u,24,25,u,u,24,25,28,29] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10],ymm6[11],ymm5[12],ymm6[13],ymm5[14,15] -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm6 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,10,11,6,7,u,u,u,u,10,11,12,13,8,9,24,25,26,27,22,23,u,u,u,u,26,27,28,29,24,25] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,0,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4],ymm7[5,6,7,8,9,10],ymm6[11,12],ymm7[13,14,15] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 -; AVX2-SLOW-NEXT: vpbroadcastq 8(%rdi), %ymm6 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm6 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[0,1,8,9,12,13],zero,zero,zero,zero,ymm6[2,3,18,19,18,19],zero,zero,zero,zero,ymm6[28,29,20,21,28,29],zero,zero -; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm8 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm8[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm8[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm8[22,23] -; AVX2-SLOW-NEXT: vpor %ymm6, %ymm8, %ymm6 -; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm8 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm6, %ymm8, %ymm6 -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,6] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6],xmm3[7] -; AVX2-SLOW-NEXT: vpsrlq $48, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7] +; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-SLOW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm2 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm3 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 +; AVX2-SLOW-NEXT: vpbroadcastd 12(%r8), %xmm3 +; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4,5,6],xmm3[7] ; AVX2-SLOW-NEXT: vmovdqa %xmm0, 64(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm6, (%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm5, 32(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; -; AVX2-FAST-ALL-LABEL: vf8: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm2 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %xmm3 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %xmm4 -; AVX2-FAST-ALL-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm5 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,0] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,ymm5[0,1,8,9,12,13],zero,zero,zero,zero,ymm5[2,3,18,19,18,19],zero,zero,zero,zero,ymm5[28,29,20,21,28,29],zero,zero -; AVX2-FAST-ALL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm6 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm6[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm6[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm6[22,23] -; AVX2-FAST-ALL-NEXT: vpor %ymm5, %ymm6, %ymm5 -; AVX2-FAST-ALL-NEXT: vpbroadcastq (%r8), %ymm6 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 -; AVX2-FAST-ALL-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm8 = <1,5,2,u,6,2,u,u> -; AVX2-FAST-ALL-NEXT: vpermd %ymm6, %ymm8, %ymm6 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[2,3,6,7,6,7],zero,zero,zero,zero,ymm6[8,9,16,17,18,19],zero,zero,zero,zero,ymm6[22,23,18,19,18,19],zero,zero -; AVX2-FAST-ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm8 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm9 = <1,5,2,6,2,6,3,u> -; AVX2-FAST-ALL-NEXT: vpermd %ymm8, %ymm9, %ymm8 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[2,3,6,7],zero,zero,zero,zero,zero,zero,ymm8[8,9,12,13],zero,zero,zero,zero,zero,zero,ymm8[18,19,22,23],zero,zero,zero,zero,zero,zero,ymm8[24,25] -; AVX2-FAST-ALL-NEXT: vpor %ymm6, %ymm8, %ymm6 -; AVX2-FAST-ALL-NEXT: vpbroadcastq 8(%rdi), %ymm8 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm6, %ymm8, %ymm6 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,8,9,14,15,u,u,u,u,u,u,12,13] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,10,11,u,u,u,u,u,u,12,13,14,15,u,u] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6],xmm1[7] -; AVX2-FAST-ALL-NEXT: vpsrlq $48, %xmm0, %xmm0 -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7] -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm0, 64(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm6, 32(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm5, (%r9) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: vf8: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm2 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm3 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm4 -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm6 = ymm5[6,7,u,u,u,u,10,11,u,u,8,9,u,u,u,u,22,23,u,u,u,u,26,27,u,u,24,25,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,6,7,10,11,u,u,8,9,u,u,8,9,12,13,u,u,22,23,26,27,u,u,24,25,u,u,24,25,28,29] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10],ymm6[11],ymm5[12],ymm6[13],ymm5[14,15] -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,10,11,6,7,u,u,u,u,10,11,12,13,8,9,24,25,26,27,22,23,u,u,u,u,26,27,28,29,24,25] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,3,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4],ymm7[5,6,7,8,9,10],ymm6[11,12],ymm7[13,14,15] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq 8(%rdi), %ymm6 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm6[0,1,8,9,12,13],zero,zero,zero,zero,ymm6[2,3,18,19,18,19],zero,zero,zero,zero,ymm6[28,29,20,21,28,29],zero,zero -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,0,2] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[0,1,8,9],zero,zero,zero,zero,zero,zero,ymm8[2,3,10,11],zero,zero,zero,zero,zero,zero,ymm8[20,21,28,29],zero,zero,zero,zero,zero,zero,ymm8[22,23] -; AVX2-FAST-PERLANE-NEXT: vpor %ymm6, %ymm8, %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq (%r8), %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm6, %ymm8, %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,8,9,14,15,u,u,u,u,u,u,12,13] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,10,11,u,u,u,u,u,u,12,13,14,15,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6],xmm2[7] -; AVX2-FAST-PERLANE-NEXT: vpsrlq $48, %xmm0, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, 64(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, (%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, 32(%r9) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-FAST-LABEL: vf8: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-FAST-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm2 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 +; AVX2-FAST-NEXT: vpbroadcastq (%r8), %ymm3 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 +; AVX2-FAST-NEXT: vpbroadcastd 12(%r8), %xmm3 +; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4,5,6],xmm3[7] +; AVX2-FAST-NEXT: vmovdqa %xmm0, 64(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm2, (%r9) +; AVX2-FAST-NEXT: vzeroupper +; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: vf8: ; AVX512: # %bb.0: @@ -608,604 +400,178 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <16 x i16>* %in.vecptr3, <16 x i16>* %in.vecptr4, <80 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa 16(%rdi), %xmm10 -; SSE-NEXT: movaps (%rsi), %xmm0 -; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%rsi), %xmm1 -; SSE-NEXT: movdqa (%rdx), %xmm14 -; SSE-NEXT: movdqa 16(%rdx), %xmm4 -; SSE-NEXT: movdqa (%rcx), %xmm15 -; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%rcx), %xmm11 -; SSE-NEXT: movdqa (%r8), %xmm9 -; SSE-NEXT: movdqa 16(%r8), %xmm13 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,65535,65535] -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: pandn %xmm13, %xmm0 -; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm11[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm3, %xmm5 -; SSE-NEXT: por %xmm0, %xmm5 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0] -; SSE-NEXT: movdqa %xmm2, %xmm12 -; SSE-NEXT: pandn %xmm5, %xmm12 -; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,2,2] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535] -; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: pandn %xmm8, %xmm5 -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm1[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm0, %xmm7 -; SSE-NEXT: por %xmm5, %xmm7 -; SSE-NEXT: pand %xmm2, %xmm7 -; SSE-NEXT: por %xmm12, %xmm7 -; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,65535] -; SSE-NEXT: pand %xmm5, %xmm7 -; SSE-NEXT: movdqa %xmm5, %xmm6 -; SSE-NEXT: pandn %xmm10, %xmm6 -; SSE-NEXT: por %xmm7, %xmm6 -; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm15[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm9, %xmm3 -; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: por %xmm7, %xmm3 -; SSE-NEXT: movdqa %xmm2, %xmm7 -; SSE-NEXT: pandn %xmm3, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[1,1,2,2] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm0, %xmm6 -; SSE-NEXT: pandn %xmm3, %xmm0 -; SSE-NEXT: movdqa (%rdi), %xmm12 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pand %xmm5, %xmm0 -; SSE-NEXT: movdqa %xmm5, %xmm3 -; SSE-NEXT: pandn %xmm12, %xmm3 -; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm6 -; SSE-NEXT: pandn %xmm0, %xmm6 +; SSE-NEXT: movdqa (%rdx), %xmm13 +; SSE-NEXT: movdqa 16(%rdx), %xmm15 +; SSE-NEXT: movdqa (%rcx), %xmm10 +; SSE-NEXT: movdqa 16(%rcx), %xmm14 +; SSE-NEXT: movdqa (%r8), %xmm8 +; SSE-NEXT: movdqa 16(%r8), %xmm9 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,65535,65535,65535,65535,0] +; SSE-NEXT: movdqa %xmm15, %xmm3 +; SSE-NEXT: pand %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[2,3,2,3] +; SSE-NEXT: movdqa %xmm1, %xmm11 +; SSE-NEXT: pandn %xmm4, %xmm11 +; SSE-NEXT: por %xmm3, %xmm11 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,65535,65535,0,65535,65535] ; SSE-NEXT: movdqa %xmm13, %xmm3 -; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: movdqa %xmm11, %xmm6 -; SSE-NEXT: psrld $16, %xmm6 -; SSE-NEXT: movdqa %xmm5, %xmm15 -; SSE-NEXT: pandn %xmm6, %xmm15 +; SSE-NEXT: pand %xmm6, %xmm3 +; SSE-NEXT: movdqa %xmm6, %xmm12 +; SSE-NEXT: pandn %xmm4, %xmm12 +; SSE-NEXT: por %xmm3, %xmm12 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,65535,0,65535] +; SSE-NEXT: movdqa %xmm10, %xmm5 +; SSE-NEXT: pand %xmm4, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,1,0,1] +; SSE-NEXT: movdqa %xmm4, %xmm3 +; SSE-NEXT: pandn %xmm2, %xmm3 +; SSE-NEXT: por %xmm5, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,65535,65535] +; SSE-NEXT: movdqa %xmm15, %xmm0 ; SSE-NEXT: pand %xmm5, %xmm0 -; SSE-NEXT: por %xmm0, %xmm15 -; SSE-NEXT: movdqa %xmm8, %xmm0 -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm6 -; SSE-NEXT: pandn %xmm0, %xmm6 -; SSE-NEXT: movdqa %xmm9, %xmm8 -; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm7 -; SSE-NEXT: por %xmm6, %xmm7 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload +; SSE-NEXT: movdqa %xmm5, %xmm7 +; SSE-NEXT: pandn %xmm2, %xmm7 +; SSE-NEXT: por %xmm0, %xmm7 ; SSE-NEXT: movdqa %xmm13, %xmm0 -; SSE-NEXT: psrld $16, %xmm0 -; SSE-NEXT: movdqa %xmm5, %xmm9 -; SSE-NEXT: pandn %xmm0, %xmm9 -; SSE-NEXT: pand %xmm5, %xmm7 -; SSE-NEXT: por %xmm7, %xmm9 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm3, %xmm0 -; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3] -; SSE-NEXT: psrlq $48, %xmm1 -; SSE-NEXT: movdqa %xmm5, %xmm3 -; SSE-NEXT: pandn %xmm1, %xmm3 -; SSE-NEXT: pand %xmm5, %xmm0 -; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1] -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm11[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm7 -; SSE-NEXT: por %xmm1, %xmm7 -; SSE-NEXT: movdqa %xmm5, %xmm11 -; SSE-NEXT: pandn %xmm0, %xmm11 -; SSE-NEXT: pand %xmm5, %xmm7 -; SSE-NEXT: por %xmm7, %xmm11 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm0, %xmm4 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,1,0,1] -; SSE-NEXT: movdqa %xmm5, %xmm10 -; SSE-NEXT: pandn %xmm7, %xmm10 -; SSE-NEXT: pand %xmm5, %xmm0 -; SSE-NEXT: por %xmm0, %xmm10 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 +; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[2,3,2,3] +; SSE-NEXT: pandn %xmm2, %xmm1 +; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm14, %xmm0 -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm1 -; SSE-NEXT: por %xmm7, %xmm1 -; SSE-NEXT: pand %xmm5, %xmm1 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3] -; SSE-NEXT: psrlq $48, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: movdqa %xmm5, %xmm0 -; SSE-NEXT: pandn %xmm4, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm12[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm7 -; SSE-NEXT: pandn %xmm1, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[1,1,1,1] -; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm6 -; SSE-NEXT: por %xmm7, %xmm6 -; SSE-NEXT: movdqa %xmm5, %xmm7 -; SSE-NEXT: pandn %xmm1, %xmm7 -; SSE-NEXT: pand %xmm5, %xmm6 -; SSE-NEXT: por %xmm6, %xmm7 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm12[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm6 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: por %xmm6, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,0,1] -; SSE-NEXT: pand %xmm5, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm5 -; SSE-NEXT: por %xmm2, %xmm5 +; SSE-NEXT: pand %xmm6, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm6 +; SSE-NEXT: por %xmm0, %xmm6 +; SSE-NEXT: pand %xmm4, %xmm15 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,1,0,1] +; SSE-NEXT: pandn %xmm0, %xmm4 +; SSE-NEXT: por %xmm15, %xmm4 +; SSE-NEXT: pand %xmm5, %xmm13 +; SSE-NEXT: pandn %xmm0, %xmm5 +; SSE-NEXT: por %xmm13, %xmm5 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pandn %xmm9, %xmm2 +; SSE-NEXT: pand %xmm0, %xmm14 +; SSE-NEXT: por %xmm2, %xmm14 +; SSE-NEXT: pand %xmm0, %xmm10 +; SSE-NEXT: pandn %xmm8, %xmm0 +; SSE-NEXT: por %xmm10, %xmm0 +; SSE-NEXT: movdqa %xmm0, 32(%r9) +; SSE-NEXT: movdqa %xmm14, 112(%r9) ; SSE-NEXT: movdqa %xmm5, (%r9) -; SSE-NEXT: movdqa %xmm7, 16(%r9) -; SSE-NEXT: movdqa %xmm0, 64(%r9) -; SSE-NEXT: movdqa %xmm10, 80(%r9) -; SSE-NEXT: movdqa %xmm11, 96(%r9) -; SSE-NEXT: movdqa %xmm3, 144(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%r9) -; SSE-NEXT: movdqa %xmm9, 48(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%r9) -; SSE-NEXT: movdqa %xmm15, 128(%r9) +; SSE-NEXT: movdqa %xmm4, 16(%r9) +; SSE-NEXT: movdqa %xmm6, 48(%r9) +; SSE-NEXT: movdqa %xmm1, 64(%r9) +; SSE-NEXT: movdqa %xmm7, 80(%r9) +; SSE-NEXT: movdqa %xmm3, 96(%r9) +; SSE-NEXT: movdqa %xmm12, 128(%r9) +; SSE-NEXT: movdqa %xmm11, 144(%r9) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%r8), %xmm12 -; AVX1-NEXT: vmovdqa 16(%r8), %xmm11 -; AVX1-NEXT: vmovdqa (%rcx), %xmm7 -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm14 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm7[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3],xmm0[4,5,6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm9 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535] -; AVX1-NEXT: vandnps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vmovdqa (%rdx), %xmm4 -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm5 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm4[1,1,2,2] -; AVX1-NEXT: vmovdqa (%rsi), %xmm15 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm6 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm15[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vandps %ymm2, %ymm9, %ymm2 -; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm13 -; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm2 -; AVX1-NEXT: vmovdqa (%rdi), %xmm0 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm10 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7] -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm10[4],xmm2[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vandnps %ymm2, %ymm9, %ymm2 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm14[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm11[3],xmm3[4,5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm11[0],xmm14[1],xmm11[1],xmm14[2],xmm11[2],xmm14[3],xmm11[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vorps %ymm2, %ymm1, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7] -; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm5[0],zero,xmm5[1],zero -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5,6,7] -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,3,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vandnps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm9, %ymm2 -; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm2 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[1,0,2,3,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3,4,5],xmm1[6],xmm3[7] -; AVX1-NEXT: vpsrlq $48, %xmm15, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm13[0,1,2,3],xmm0[4],xmm13[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,1,1] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 -; AVX1-NEXT: vandnps %ymm2, %ymm9, %ymm2 -; AVX1-NEXT: vandps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm12[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5],xmm3[6],xmm2[7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,6,7,u,u,u,u,8,9,4,5,10,11,u,u] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 -; AVX1-NEXT: vandnps %ymm3, %ymm9, %ymm3 -; AVX1-NEXT: vandps %ymm5, %ymm9, %ymm5 -; AVX1-NEXT: vorps %ymm3, %ymm5, %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5 -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,7,6,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7] -; AVX1-NEXT: vpsrld $16, %xmm14, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5,6,7] -; AVX1-NEXT: vmovdqa %xmm3, 128(%r9) -; AVX1-NEXT: vmovdqa %xmm5, 144(%r9) -; AVX1-NEXT: vmovdqa %xmm0, (%r9) -; AVX1-NEXT: vmovdqa %xmm2, 16(%r9) -; AVX1-NEXT: vmovdqa %xmm8, 64(%r9) +; AVX1-NEXT: vmovdqa (%rdx), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm1 +; AVX1-NEXT: vmovdqa (%r8), %xmm2 +; AVX1-NEXT: vmovdqa 16(%r8), %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm4[2],xmm1[3,4,5,6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm4[0],xmm0[1,2,3,4],xmm4[5],xmm0[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm1[0],xmm6[1],xmm1[2,3,4,5],xmm6[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6,7] +; AVX1-NEXT: vmovdqa (%rcx), %xmm5 +; AVX1-NEXT: vmovdqa 16(%rcx), %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm12 = xmm7[0],xmm4[1,2,3,4],xmm7[5],xmm4[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2],xmm0[3,4,5,6],xmm7[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3],xmm5[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm3, 112(%r9) +; AVX1-NEXT: vmovdqa %xmm2, 32(%r9) +; AVX1-NEXT: vmovdqa %xmm6, 96(%r9) +; AVX1-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX1-NEXT: vmovdqa %xmm1, 80(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 96(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 112(%r9) -; AVX1-NEXT: vmovdqa %xmm4, 32(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 48(%r9) -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovdqa %xmm12, 48(%r9) +; AVX1-NEXT: vmovdqa %xmm11, (%r9) +; AVX1-NEXT: vmovdqa %xmm10, 16(%r9) +; AVX1-NEXT: vmovdqa %xmm9, 128(%r9) +; AVX1-NEXT: vmovdqa %xmm8, 144(%r9) ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf16: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm12 -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm3 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm4 +; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm0 +; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm1 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm2 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm5 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm6 -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm1, %ymm7, %ymm1 -; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm8 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm8, %ymm9 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[1,2,2,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = mem[2,1,2,3] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm6[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2],xmm1[3],xmm5[4],xmm1[5,6],xmm5[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vpbroadcastq 8(%rdi), %ymm1 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm8 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm11[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm3[3,2,3,3,7,6,7,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm11[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,2,4,6,7,6] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm1, %ymm5, %ymm1 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm5, %ymm1 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm2[0,1,1,1] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm1, %ymm3, %ymm3 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm3, %ymm2, %ymm2 -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm3, %ymm2 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm2 +; AVX2-SLOW-NEXT: vpbroadcastq 16(%r8), %ymm4 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm1, %ymm4, %ymm1 +; AVX2-SLOW-NEXT: vpbroadcastq 24(%r8), %ymm4 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm5 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm6, %ymm0, %ymm5, %ymm0 +; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm4, 128(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm1, 96(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 64(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm1, 128(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm8, 32(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm9, (%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm3, 32(%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; -; AVX2-FAST-ALL-LABEL: vf16: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm12 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %ymm3 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %ymm4 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %ymm2 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %xmm5 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm6 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %xmm0 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm10, %ymm7, %ymm1, %ymm1 -; AVX2-FAST-ALL-NEXT: vpbroadcastq (%r8), %ymm8 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm1, %ymm8, %ymm9 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,2,2,2] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm5 = mem[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3],xmm0[4],xmm5[5,6],xmm0[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm10, %ymm1, %ymm0, %ymm0 -; AVX2-FAST-ALL-NEXT: vpbroadcastq 8(%rdi), %ymm1 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm8 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm10, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vpbroadcastq {{.*#+}} ymm1 = [25769803781,25769803781,25769803781,25769803781] -; AVX2-FAST-ALL-NEXT: vpermd %ymm3, %ymm1, %ymm1 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,u,u,30,31,u,u,u,u,28,29] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm6 = ymm3[3,2,3,3,7,6,7,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm10, %ymm1, %ymm5, %ymm1 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm1, %ymm5, %ymm1 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm10, %ymm3, %ymm2, %ymm2 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm3, %ymm2 -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm2, 64(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm1, 128(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm0, 96(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm8, 32(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm9, (%r9) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: vf16: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm12 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm3 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm5 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm6 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm7, %ymm1, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq (%r8), %ymm8 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm1, %ymm8, %ymm9 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = mem[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,6,7,u,u,10,11,u,u,u,u,8,9] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3],xmm0[4],xmm5[5,6],xmm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm1, %ymm0, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq 8(%rdi), %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm2[0,1,0,1,4,5,4,5] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,u,u,24,25,20,21,u,u,24,25] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,18,19,u,u,20,21,u,u,24,25,24,25,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3],ymm1[4],ymm5[5,6],ymm1[7],ymm5[8,9],ymm1[10],ymm5[11],ymm1[12],ymm5[13,14],ymm1[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm12[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,u,u,30,31,u,u,u,u,28,29] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm6 = ymm3[3,2,3,3,7,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm1, %ymm5, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm5 = ymm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm1, %ymm5, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[3,0,3,0,7,4,7,4] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm12[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10],ymm4[11],ymm2[12],ymm4[13],ymm2[14,15] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm3, %ymm2, %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm3 = ymm11[0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm2, %ymm3, %ymm2 -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 64(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 128(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 96(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 32(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, (%r9) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-FAST-LABEL: vf16: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm0 +; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm1 +; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm2 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm2[0,1,1,1] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm1, %ymm3, %ymm3 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm2 +; AVX2-FAST-NEXT: vpbroadcastq 16(%r8), %ymm4 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm1, %ymm4, %ymm1 +; AVX2-FAST-NEXT: vpbroadcastq 24(%r8), %ymm4 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-FAST-NEXT: vpbroadcastq (%r8), %ymm5 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm5, %ymm0 +; AVX2-FAST-NEXT: vmovdqa %ymm0, (%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm4, 128(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm1, 96(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm2, 64(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm3, 32(%r9) +; AVX2-FAST-NEXT: vzeroupper +; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: vf16: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512-NEXT: vmovdqa (%rsi), %ymm1 -; AVX512-NEXT: vmovdqa (%rdx), %ymm2 -; AVX512-NEXT: vmovdqa (%rcx), %ymm3 -; AVX512-NEXT: vmovdqa (%r8), %ymm4 -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5 -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm6 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,16,32,48,u,1,17,33,49,u,2,18,34,50,u,3,19,35,51,u,4,20,36,52,u,5,21,37,53,u,6,22> -; AVX512-NEXT: vpermi2w %zmm5, %zmm6, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31] -; AVX512-NEXT: vpermi2w %zmm4, %zmm7, %zmm6 -; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm4, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = <6,22,38,55,u,7,23,39,56,u,8,24,40,57,u,9,25,41,58,u,10,26,42,59,u,11,27,43,60,u,12,28> -; AVX512-NEXT: vpermi2w %zmm7, %zmm5, %zmm8 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,39,5,6,7,8,40,10,11,12,13,41,15,16,17,18,42,20,21,22,23,43,25,26,27,28,44,30,31] -; AVX512-NEXT: vpermi2w %zmm1, %zmm8, %zmm5 -; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [14,0,29,13,31,15,0,30,14,0,29,13,31,15,0,30] -; AVX512-NEXT: # ymm7 = mem[0,1,0,1] -; AVX512-NEXT: vpermi2w %ymm1, %ymm2, %ymm7 -; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [12,29,14,31,0,13,30,15,12,29,14,31,0,13,30,15] -; AVX512-NEXT: # ymm1 = mem[0,1,0,1] -; AVX512-NEXT: vpermi2w %ymm0, %ymm4, %ymm1 -; AVX512-NEXT: movw $12684, %ax # imm = 0x318C -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vmovdqu16 %ymm7, %ymm1 {%k1} -; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [0,1,2,3,29,5,6,7,8,30,10,11,12,13,31,15] -; AVX512-NEXT: vpermi2w %ymm3, %ymm1, %ymm0 -; AVX512-NEXT: vmovdqa %ymm0, 128(%r9) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%r9) -; AVX512-NEXT: vmovdqu64 %zmm6, (%r9) +; AVX512-NEXT: vmovdqa (%rdx), %ymm0 +; AVX512-NEXT: vmovdqa (%r8), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm2, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,38,3,4,5,6,39,8,9,10,11,40,13,14,15,16,41,18,19,20,21,42,23,24,25,26,43,28,29,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm2, %zmm4 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31] +; AVX512-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; AVX512-NEXT: vmovdqa %ymm2, 128(%r9) +; AVX512-NEXT: vmovdqu64 %zmm4, 64(%r9) +; AVX512-NEXT: vmovdqu64 %zmm3, (%r9) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32 @@ -1229,1272 +595,353 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <32 x i16>* %in.vecptr3, <32 x i16>* %in.vecptr4, <160 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf32: ; SSE: # %bb.0: -; SSE-NEXT: subq $328, %rsp # imm = 0x148 -; SSE-NEXT: movdqa (%rdi), %xmm10 -; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa (%rsi), %xmm5 -; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa (%rdx), %xmm14 -; SSE-NEXT: movdqa 16(%rdx), %xmm9 -; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa (%rcx), %xmm2 -; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%rcx), %xmm8 +; SSE-NEXT: subq $72, %rsp +; SSE-NEXT: movdqa (%rdx), %xmm12 +; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 32(%rdx), %xmm5 +; SSE-NEXT: movdqa 48(%rdx), %xmm4 +; SSE-NEXT: movdqa 32(%rcx), %xmm10 +; SSE-NEXT: movdqa 48(%rcx), %xmm8 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa (%r8), %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%r8), %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,65535,65535,65535] -; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm4, %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0] -; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm14[1,1,2,2] -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,0,65535] -; SSE-NEXT: movdqa %xmm1, %xmm7 -; SSE-NEXT: pandn %xmm6, %xmm7 -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm1, %xmm6 -; SSE-NEXT: por %xmm7, %xmm6 -; SSE-NEXT: pand %xmm2, %xmm6 -; SSE-NEXT: por %xmm0, %xmm6 -; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,0,65535,65535,65535] -; SSE-NEXT: pand %xmm12, %xmm6 -; SSE-NEXT: movdqa %xmm12, %xmm0 -; SSE-NEXT: pandn %xmm10, %xmm0 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 32(%r8), %xmm9 +; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 48(%r8), %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,65535,65535,0] ; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: pandn %xmm3, %xmm0 -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm4, %xmm6 -; SSE-NEXT: por %xmm0, %xmm6 -; SSE-NEXT: movdqa %xmm2, %xmm11 -; SSE-NEXT: pandn %xmm6, %xmm11 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,2,2] -; SSE-NEXT: movdqa %xmm1, %xmm7 -; SSE-NEXT: pandn %xmm6, %xmm7 -; SSE-NEXT: movdqa 16(%rsi), %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm1, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm11, %xmm0 -; SSE-NEXT: movdqa 16(%rdi), %xmm5 +; SSE-NEXT: pand %xmm3, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3] +; SSE-NEXT: movdqa %xmm2, %xmm7 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa %xmm3, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm5, %xmm0 +; SSE-NEXT: movdqa %xmm5, %xmm11 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: movdqa %xmm12, %xmm3 -; SSE-NEXT: pandn %xmm5, %xmm3 -; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 32(%r8), %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: pandn %xmm3, %xmm0 -; SSE-NEXT: movdqa 32(%rcx), %xmm3 -; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm4, %xmm7 -; SSE-NEXT: por %xmm0, %xmm7 -; SSE-NEXT: movdqa %xmm2, %xmm13 -; SSE-NEXT: pandn %xmm7, %xmm13 -; SSE-NEXT: movdqa 32(%rdx), %xmm15 -; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm15[1,1,2,2] -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: pandn %xmm11, %xmm0 -; SSE-NEXT: movdqa 32(%rsi), %xmm11 -; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm11[3,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm1, %xmm5 -; SSE-NEXT: por %xmm0, %xmm5 -; SSE-NEXT: pand %xmm2, %xmm5 -; SSE-NEXT: por %xmm13, %xmm5 -; SSE-NEXT: pand %xmm12, %xmm5 -; SSE-NEXT: movdqa 32(%rdi), %xmm9 -; SSE-NEXT: movdqa %xmm12, %xmm0 -; SSE-NEXT: pandn %xmm9, %xmm0 -; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 48(%rcx), %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm4, %xmm0 -; SSE-NEXT: movdqa 48(%r8), %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pandn %xmm3, %xmm4 -; SSE-NEXT: por %xmm0, %xmm4 -; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: pandn %xmm4, %xmm0 -; SSE-NEXT: movdqa 48(%rsi), %xmm10 -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm10[3,3,3,3,4,5,6,7] -; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; SSE-NEXT: pand %xmm1, %xmm4 -; SSE-NEXT: movdqa 48(%rdx), %xmm8 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,2] -; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pandn %xmm5, %xmm1 -; SSE-NEXT: por %xmm4, %xmm1 -; SSE-NEXT: pand %xmm2, %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm1 -; SSE-NEXT: movdqa 48(%rdi), %xmm7 +; SSE-NEXT: pand %xmm6, %xmm0 +; SSE-NEXT: movdqa %xmm6, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,65535,65535,0,65535] ; SSE-NEXT: movdqa %xmm12, %xmm0 -; SSE-NEXT: pandn %xmm7, %xmm0 -; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pand %xmm5, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,0,1] +; SSE-NEXT: movdqa %xmm5, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,65535,65535] +; SSE-NEXT: movdqa %xmm7, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm8, %xmm0 +; SSE-NEXT: pand %xmm7, %xmm0 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill +; SSE-NEXT: movdqa %xmm10, %xmm0 +; SSE-NEXT: pand %xmm3, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,2,3] +; SSE-NEXT: movdqa %xmm3, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa %xmm6, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 +; SSE-NEXT: movdqa 16(%rcx), %xmm8 +; SSE-NEXT: movdqa %xmm8, %xmm1 +; SSE-NEXT: pand %xmm6, %xmm1 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm0, %xmm4 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload -; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: psrld $16, %xmm4 -; SSE-NEXT: movdqa %xmm12, %xmm1 -; SSE-NEXT: pandn %xmm4, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm5 -; SSE-NEXT: pandn %xmm0, %xmm5 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload -; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload -; SSE-NEXT: # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE-NEXT: psrld $16, %xmm5 -; SSE-NEXT: movdqa %xmm12, %xmm1 -; SSE-NEXT: pandn %xmm5, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm5 -; SSE-NEXT: pandn %xmm0, %xmm5 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7] -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload -; SSE-NEXT: psrld $16, %xmm5 -; SSE-NEXT: movdqa %xmm12, %xmm1 -; SSE-NEXT: pandn %xmm5, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm4, %xmm1 +; SSE-NEXT: pand %xmm5, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,1,0,1] +; SSE-NEXT: movdqa %xmm5, %xmm14 +; SSE-NEXT: pandn %xmm2, %xmm14 +; SSE-NEXT: por %xmm1, %xmm14 +; SSE-NEXT: movdqa %xmm7, %xmm13 +; SSE-NEXT: pandn %xmm2, %xmm13 +; SSE-NEXT: movdqa %xmm11, %xmm1 +; SSE-NEXT: pand %xmm7, %xmm1 +; SSE-NEXT: por %xmm1, %xmm13 +; SSE-NEXT: movdqa 16(%rdx), %xmm15 +; SSE-NEXT: movdqa %xmm15, %xmm2 +; SSE-NEXT: pand %xmm3, %xmm2 +; SSE-NEXT: movdqa 16(%r8), %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm5 -; SSE-NEXT: pandn %xmm0, %xmm5 +; SSE-NEXT: movdqa %xmm3, %xmm12 +; SSE-NEXT: pandn %xmm0, %xmm12 +; SSE-NEXT: por %xmm2, %xmm12 +; SSE-NEXT: movdqa %xmm6, %xmm11 +; SSE-NEXT: pandn %xmm0, %xmm11 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE-NEXT: psrld $16, %xmm5 -; SSE-NEXT: movdqa %xmm12, %xmm1 -; SSE-NEXT: pandn %xmm5, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm14, %xmm0 -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; SSE-NEXT: movdqa %xmm2, %xmm5 -; SSE-NEXT: pandn %xmm0, %xmm5 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm14[1,1,1,1] -; SSE-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7] -; SSE-NEXT: movdqa %xmm3, %xmm5 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] -; SSE-NEXT: movdqa %xmm12, %xmm3 -; SSE-NEXT: pandn %xmm1, %xmm3 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm12, %xmm1 -; SSE-NEXT: pandn %xmm9, %xmm1 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm8, %xmm1 -; SSE-NEXT: psrlq $48, %xmm1 -; SSE-NEXT: movdqa %xmm12, %xmm3 +; SSE-NEXT: pand %xmm6, %xmm0 +; SSE-NEXT: por %xmm0, %xmm11 +; SSE-NEXT: pand %xmm5, %xmm10 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1] +; SSE-NEXT: movdqa %xmm5, %xmm9 +; SSE-NEXT: pandn %xmm0, %xmm9 +; SSE-NEXT: por %xmm10, %xmm9 +; SSE-NEXT: movdqa %xmm7, %xmm10 +; SSE-NEXT: pandn %xmm0, %xmm10 +; SSE-NEXT: pand %xmm7, %xmm8 +; SSE-NEXT: por %xmm8, %xmm10 +; SSE-NEXT: movdqa (%rcx), %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: pand %xmm3, %xmm0 +; SSE-NEXT: movdqa (%r8), %xmm8 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3] ; SSE-NEXT: pandn %xmm1, %xmm3 -; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm3, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,1,1] -; SSE-NEXT: movdqa %xmm5, %xmm3 -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1] -; SSE-NEXT: movdqa %xmm12, %xmm11 -; SSE-NEXT: pandn %xmm6, %xmm11 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm11 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm6 -; SSE-NEXT: pandn %xmm0, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: movdqa %xmm12, %xmm9 -; SSE-NEXT: pandn %xmm10, %xmm9 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm9 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm0, %xmm4 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: movdqa %xmm7, %xmm1 -; SSE-NEXT: psrlq $48, %xmm1 -; SSE-NEXT: movdqa %xmm12, %xmm10 -; SSE-NEXT: pandn %xmm1, %xmm10 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm10 -; SSE-NEXT: movdqa %xmm15, %xmm5 +; SSE-NEXT: pand %xmm6, %xmm4 +; SSE-NEXT: pandn %xmm1, %xmm6 +; SSE-NEXT: por %xmm4, %xmm6 ; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm0, %xmm4 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[1,1,1,1] -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,1] -; SSE-NEXT: movdqa %xmm12, %xmm6 -; SSE-NEXT: pandn %xmm3, %xmm6 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm6 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm12, %xmm4 -; SSE-NEXT: pandn %xmm15, %xmm4 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm4 -; SSE-NEXT: pshufhw $172, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = mem[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm3, %xmm0 -; SSE-NEXT: psrlq $48, %xmm1 -; SSE-NEXT: movdqa %xmm12, %xmm15 -; SSE-NEXT: pandn %xmm1, %xmm15 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm15 +; SSE-NEXT: pand %xmm5, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,0,1] +; SSE-NEXT: pandn %xmm1, %xmm5 +; SSE-NEXT: por %xmm0, %xmm5 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: pand %xmm7, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm7 +; SSE-NEXT: por %xmm0, %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload +; SSE-NEXT: pand %xmm0, %xmm15 +; SSE-NEXT: por %xmm1, %xmm15 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload +; SSE-NEXT: pand %xmm0, %xmm2 +; SSE-NEXT: por %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; SSE-NEXT: pand %xmm0, %xmm4 +; SSE-NEXT: por %xmm1, %xmm4 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[0,1,3,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,1] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm3, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,1,1] -; SSE-NEXT: movdqa %xmm14, %xmm3 -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload -; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,1] -; SSE-NEXT: movdqa %xmm12, %xmm14 -; SSE-NEXT: pandn %xmm7, %xmm14 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm14 -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,5,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: movdqa %xmm2, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7] -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: movdqa %xmm12, %xmm3 -; SSE-NEXT: pandn %xmm13, %xmm3 -; SSE-NEXT: pand %xmm12, %xmm0 -; SSE-NEXT: por %xmm0, %xmm3 -; SSE-NEXT: pshufhw $172, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = mem[0,1,2,3,4,7,6,6] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,7,6] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,3,3] -; SSE-NEXT: pand %xmm2, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm2 -; SSE-NEXT: por %xmm7, %xmm2 -; SSE-NEXT: pand %xmm12, %xmm2 -; SSE-NEXT: psrlq $48, %xmm8 -; SSE-NEXT: pandn %xmm8, %xmm12 -; SSE-NEXT: por %xmm2, %xmm12 -; SSE-NEXT: movdqa %xmm12, 304(%r9) -; SSE-NEXT: movdqa %xmm3, 256(%r9) -; SSE-NEXT: movdqa %xmm14, 240(%r9) -; SSE-NEXT: movdqa %xmm15, 224(%r9) -; SSE-NEXT: movdqa %xmm4, 176(%r9) -; SSE-NEXT: movdqa %xmm6, 160(%r9) -; SSE-NEXT: movdqa %xmm10, 144(%r9) +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: pandn %xmm8, %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, 32(%r9) +; SSE-NEXT: movdqa %xmm4, 112(%r9) +; SSE-NEXT: movdqa %xmm2, 192(%r9) +; SSE-NEXT: movdqa %xmm15, 272(%r9) +; SSE-NEXT: movdqa %xmm7, (%r9) +; SSE-NEXT: movdqa %xmm5, 16(%r9) +; SSE-NEXT: movdqa %xmm6, 48(%r9) +; SSE-NEXT: movdqa %xmm3, 64(%r9) +; SSE-NEXT: movdqa %xmm10, 80(%r9) ; SSE-NEXT: movdqa %xmm9, 96(%r9) -; SSE-NEXT: movdqa %xmm11, 80(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 288(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 272(%r9) +; SSE-NEXT: movdqa %xmm11, 128(%r9) +; SSE-NEXT: movdqa %xmm12, 144(%r9) +; SSE-NEXT: movdqa %xmm13, 160(%r9) +; SSE-NEXT: movdqa %xmm14, 176(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 208(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 192(%r9) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 128(%r9) +; SSE-NEXT: movaps %xmm0, 224(%r9) +; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 240(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%r9) +; SSE-NEXT: movaps %xmm0, 256(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%r9) +; SSE-NEXT: movaps %xmm0, 288(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%r9) -; SSE-NEXT: addq $328, %rsp # imm = 0x148 +; SSE-NEXT: movaps %xmm0, 304(%r9) +; SSE-NEXT: addq $72, %rsp ; SSE-NEXT: retq ; ; AVX1-LABEL: vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: subq $152, %rsp -; AVX1-NEXT: vmovdqa 32(%rdi), %xmm14 -; AVX1-NEXT: vmovdqa 48(%rdi), %xmm9 -; AVX1-NEXT: vmovdqa 32(%r8), %xmm8 -; AVX1-NEXT: vmovdqa 48(%r8), %xmm10 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm12 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535] -; AVX1-NEXT: vandnps %ymm0, %ymm12, %ymm2 -; AVX1-NEXT: vmovdqa 32(%rcx), %xmm0 -; AVX1-NEXT: vmovdqa 48(%rcx), %xmm11 -; AVX1-NEXT: vmovdqa 32(%rdx), %xmm3 -; AVX1-NEXT: vmovdqa 48(%rdx), %xmm13 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,3,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 -; AVX1-NEXT: vandps %ymm5, %ymm12, %ymm5 -; AVX1-NEXT: vorps %ymm2, %ymm5, %ymm5 -; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2 -; AVX1-NEXT: vmovdqa 48(%rsi), %xmm1 -; AVX1-NEXT: vpsrlq $48, %xmm2, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6,7] -; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[1,0,2,3,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7] -; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3],xmm5[4,5,6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 -; AVX1-NEXT: vandnps %ymm5, %ymm12, %ymm5 -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm2[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 -; AVX1-NEXT: vandps %ymm6, %ymm12, %ymm6 -; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm5 -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm14[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7] -; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm9[4],xmm6[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vmovdqa %xmm11, (%rsp) # 16-byte Spill -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm11[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm10[3],xmm7[4,5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm4, %ymm4 -; AVX1-NEXT: vandnps %ymm6, %ymm12, %ymm6 -; AVX1-NEXT: vandps %ymm4, %ymm12, %ymm4 -; AVX1-NEXT: vorps %ymm6, %ymm4, %ymm4 -; AVX1-NEXT: vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm13[0],zero,xmm13[1],zero -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5,6,7] -; AVX1-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4 -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7] -; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm14[4],xmm5[5,6,7] +; AVX1-NEXT: vmovdqa (%rdx), %xmm13 +; AVX1-NEXT: vmovdqa 16(%rdx), %xmm14 +; AVX1-NEXT: vmovdqa 32(%rdx), %xmm11 +; AVX1-NEXT: vmovdqa (%r8), %xmm12 +; AVX1-NEXT: vmovdqa 16(%r8), %xmm5 +; AVX1-NEXT: vmovdqa 32(%r8), %xmm6 +; AVX1-NEXT: vmovdqa 48(%r8), %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm7[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm13[0],xmm0[1],xmm13[2,3,4,5],xmm0[6],xmm13[7] ; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,1] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vmovdqa (%rcx), %xmm4 -; AVX1-NEXT: vandnps %ymm0, %ymm12, %ymm0 -; AVX1-NEXT: vandps %ymm1, %ymm12, %ymm1 -; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm8[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7] -; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7] +; AVX1-NEXT: vmovdqa 16(%rcx), %xmm1 +; AVX1-NEXT: vmovdqa 32(%rcx), %xmm2 +; AVX1-NEXT: vmovdqa 48(%rcx), %xmm8 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm1[1,2,3,4],xmm4[5],xmm1[6,7] +; AVX1-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm8[0,1,2,3],xmm0[4],xmm8[5,6,7] ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm4[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa (%r8), %xmm10 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3],xmm0[4,5,6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vmovdqa (%rsi), %xmm8 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa (%rdx), %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3,4,5],xmm5[6],xmm2[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 -; AVX1-NEXT: vandnps %ymm1, %ymm12, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm12, %ymm2 -; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm14 -; AVX1-NEXT: vmovdqa (%rdi), %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm4[2],xmm2[3,4,5,6],xmm4[7] ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm5 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm13 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm13[4],xmm1[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm6, %ymm1 -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm9 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm9[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vmovdqa 16(%r8), %xmm11 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm11[3],xmm0[4,5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm15 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm15[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,7] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm7, %ymm0 -; AVX1-NEXT: vandnps %ymm1, %ymm12, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm12, %ymm0 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm1 -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vpblendw {{.*#+}} xmm15 = xmm0[0,1,2,3],xmm7[4],xmm0[5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7] -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,2,4,5,6,7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,3,3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vandnps %ymm0, %ymm12, %ymm0 -; AVX1-NEXT: vandps %ymm6, %ymm12, %ymm6 -; AVX1-NEXT: vorps %ymm0, %ymm6, %ymm0 -; AVX1-NEXT: vpsrlq $48, %xmm8, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6,7] -; AVX1-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[1,0,2,3,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2,3,4,5],xmm6[6],xmm0[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[2,3,2,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2,3,4,5],xmm0[6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0,1],xmm4[2],xmm14[3,4,5,6],xmm4[7] ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm2[0,1,3,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,1,1] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 -; AVX1-NEXT: vandnps %ymm3, %ymm12, %ymm3 -; AVX1-NEXT: vandps %ymm2, %ymm12, %ymm2 -; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3,4,5],xmm4[6],xmm3[7] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,1,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload -; AVX1-NEXT: # xmm6 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,6,7,4,5,6,7,8,9,4,5,10,11,6,7] -; AVX1-NEXT: vpshufb %xmm7, %xmm6, %xmm8 -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm8, %ymm6 -; AVX1-NEXT: vandnps %ymm4, %ymm12, %ymm4 -; AVX1-NEXT: vandps %ymm6, %ymm12, %ymm6 -; AVX1-NEXT: vorps %ymm4, %ymm6, %ymm4 -; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload -; AVX1-NEXT: vpsrld $16, %xmm0, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4 -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3,4,5],xmm0[6],xmm4[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7] -; AVX1-NEXT: vpshufb %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,6] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vandnps %ymm1, %ymm12, %ymm1 -; AVX1-NEXT: vandps %ymm4, %ymm12, %ymm4 -; AVX1-NEXT: vorps %ymm1, %ymm4, %ymm1 -; AVX1-NEXT: vpsrld $16, %xmm9, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,7,6,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7] -; AVX1-NEXT: vmovdqa %xmm1, 144(%r9) -; AVX1-NEXT: vmovdqa %xmm4, 128(%r9) -; AVX1-NEXT: vmovdqa %xmm0, 304(%r9) -; AVX1-NEXT: vmovdqa %xmm6, 288(%r9) -; AVX1-NEXT: vmovdqa %xmm3, 16(%r9) -; AVX1-NEXT: vmovdqa %xmm2, (%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 80(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 64(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 112(%r9) -; AVX1-NEXT: vmovdqa %xmm15, 96(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 48(%r9) -; AVX1-NEXT: vmovdqa %xmm14, 32(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 176(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 160(%r9) +; AVX1-NEXT: vpblendw {{.*#+}} xmm15 = xmm4[0],xmm13[1,2,3,4],xmm4[5],xmm13[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm8[0,1,2],xmm5[3],xmm8[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm12[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm5[4],xmm13[5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm14[0],xmm5[1],xmm14[2,3,4,5],xmm5[6],xmm14[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2],xmm7[3],xmm14[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3] +; AVX1-NEXT: vmovdqa 48(%rdx), %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0,1],xmm7[2],xmm0[3,4,5,6],xmm7[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm7[0],xmm11[1,2,3,4],xmm7[5],xmm11[6,7] +; AVX1-NEXT: vmovdqa (%rcx), %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1,2],xmm6[3],xmm2[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm12[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2],xmm2[3,4,5,6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4],xmm1[5],xmm0[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3,4,5],xmm3[6],xmm0[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm11[0,1,2,3],xmm3[4],xmm11[5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm11[0,1,2],xmm12[3],xmm11[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm7, 32(%r9) +; AVX1-NEXT: vmovdqa %xmm4, 112(%r9) +; AVX1-NEXT: vmovdqa %xmm6, 192(%r9) +; AVX1-NEXT: vmovdqa %xmm14, 272(%r9) +; AVX1-NEXT: vmovdqa %xmm13, (%r9) +; AVX1-NEXT: vmovdqa %xmm5, 16(%r9) +; AVX1-NEXT: vmovdqa %xmm1, 48(%r9) +; AVX1-NEXT: vmovdqa %xmm2, 64(%r9) +; AVX1-NEXT: vmovdqa %xmm10, 80(%r9) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 96(%r9) +; AVX1-NEXT: vmovdqa %xmm15, 128(%r9) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 144(%r9) +; AVX1-NEXT: vmovdqa %xmm3, 160(%r9) +; AVX1-NEXT: vmovdqa %xmm0, 176(%r9) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 272(%r9) +; AVX1-NEXT: vmovaps %xmm0, 224(%r9) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 256(%r9) +; AVX1-NEXT: vmovaps %xmm0, 240(%r9) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm0, 208(%r9) +; AVX1-NEXT: vmovdqa %xmm8, 288(%r9) +; AVX1-NEXT: vmovdqa %xmm9, 304(%r9) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 192(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 240(%r9) -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 224(%r9) -; AVX1-NEXT: addq $152, %rsp -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovaps %xmm0, 256(%r9) ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf32: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: subq $72, %rsp -; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm12 -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm2 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm14 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm11 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm9 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm10 -; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm4 -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm13 -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm7 -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm3 -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm3[0,1,0,1] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm5, %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13> -; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm7, %xmm5 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm9[1,2,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2],xmm6[3],xmm5[4,5],xmm6[6],xmm5[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = mem[2,1,2,3] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4],xmm5[5,6],xmm1[7] -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm5 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm6 -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3] -; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm9 -; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm4, %xmm4 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm0 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm4, %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm13, %xmm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm11[1,2,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = mem[2,1,2,3] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[3,3,3,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3],xmm7[4],xmm4[5,6],xmm7[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm4, %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm12[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8],ymm0[9],ymm4[10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm14[3,2,3,3,7,6,7,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,2,3,2,4,6,7,6] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3],ymm7[4],ymm4[5,6],ymm7[7],ymm4[8,9],ymm7[10],ymm4[11],ymm7[12],ymm4[13,14],ymm7[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm4, %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm0 = -; AVX2-SLOW-NEXT: vpshufb %ymm0, %ymm6, %ymm4 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm9[0,1,0,1,4,5,4,5] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10],ymm7[11],ymm4[12,13],ymm7[14],ymm4[15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm12[0,1,2,1,4,5,6,5] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm13 = ymm2[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm7[2],ymm13[3],ymm7[4],ymm13[5,6],ymm7[7],ymm13[8,9],ymm7[10],ymm13[11],ymm7[12],ymm13[13,14],ymm7[15] -; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm7 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm4, %ymm13, %ymm4 -; AVX2-SLOW-NEXT: vpshufb %ymm0, %ymm5, %ymm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = ymm7[0,1,0,1,4,5,4,5] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm15 = ymm0[0],ymm13[1],ymm0[2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7,8],ymm13[9],ymm0[10],ymm13[11],ymm0[12,13],ymm13[14],ymm0[15] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm13 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm13[3,1,2,2,4,5,6,7,11,9,10,10,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm1 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm1[0,1,2,1,4,5,6,5] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3],ymm8[4],ymm0[5,6],ymm8[7],ymm0[8,9],ymm8[10],ymm0[11],ymm8[12],ymm0[13,14],ymm8[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm15[2,3,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm8, %ymm0, %ymm15 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm1[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm7[2,3,2,3,6,7,6,7] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm8[0],ymm0[1],ymm8[2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7,8],ymm0[9],ymm8[10],ymm0[11],ymm8[12,13],ymm0[14],ymm8[15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm8 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,3,2,4,6,7,6] ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = ymm0[3,2,3,3,7,6,7,7] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm10[0,1],ymm8[2],ymm10[3],ymm8[4],ymm10[5,6],ymm8[7],ymm10[8,9],ymm8[10],ymm10[11],ymm8[12],ymm10[13,14],ymm8[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm2, %ymm8, %ymm2 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = -; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm6, %ymm10 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm11 = ymm14[3,0,3,0,7,4,7,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10],ymm10[11],ymm11[12,13],ymm10[14],ymm11[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm12[1,1,2,2] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm11[0],ymm9[1,2],ymm11[3],ymm9[4],ymm11[5],ymm9[6,7],ymm11[8],ymm9[9,10],ymm11[11],ymm9[12],ymm11[13],ymm9[14,15] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm10, %ymm9, %ymm9 -; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm5, %ymm8 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm10 = ymm0[3,0,3,0,7,4,7,4] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4,5],ymm8[6],ymm10[7,8],ymm8[9],ymm10[10],ymm8[11],ymm10[12,13],ymm8[14],ymm10[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,1,2,2] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,1,2,2] -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3],ymm7[4],ymm1[5],ymm7[6,7],ymm1[8],ymm7[9,10],ymm1[11],ymm7[12],ymm1[13],ymm7[14,15] -; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm8, %ymm1, %ymm1 -; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm3 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpbroadcastq 40(%rdi), %ymm8 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpbroadcastq 32(%r8), %ymm10 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpbroadcastq 8(%rdi), %ymm11 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm12 = ymm14[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm4, %ymm12, %ymm4 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm15, %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm5 = ymm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm5, %ymm2 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload -; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm12, %ymm12 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm9, %ymm12, %ymm9 -; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm13, %ymm5 -; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm5, %ymm1 -; AVX2-SLOW-NEXT: vmovdqa %ymm1, 64(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm9, 224(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm2, 128(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 256(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm6, 288(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm11, 32(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm10, 160(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm8, 192(%r9) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%r9) -; AVX2-SLOW-NEXT: addq $72, %rsp +; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm1 +; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm2 +; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm3 +; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm4 +; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm5 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm5[0,1,1,1] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm6, %ymm6 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[1,1,2,2] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm3, %ymm5, %ymm5 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[1,1,2,2] +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm2, %ymm9, %ymm2 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm4 +; AVX2-SLOW-NEXT: vpbroadcastq (%r8), %ymm7 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm0, %ymm7, %ymm7 +; AVX2-SLOW-NEXT: vpbroadcastq 48(%r8), %ymm9 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm0, %ymm9, %ymm9 +; AVX2-SLOW-NEXT: vpbroadcastq 56(%r8), %ymm11 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm12, %ymm1, %ymm11, %ymm11 +; AVX2-SLOW-NEXT: vpbroadcastq 32(%r8), %ymm13 +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm1, %ymm13, %ymm1 +; AVX2-SLOW-NEXT: vpbroadcastq 24(%r8), %ymm8 +; AVX2-SLOW-NEXT: vpblendvb %ymm12, %ymm0, %ymm8, %ymm0 +; AVX2-SLOW-NEXT: vpbroadcastq 16(%r8), %ymm8 +; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm3, %ymm8, %ymm3 +; AVX2-SLOW-NEXT: vmovdqa %ymm3, 96(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm0, 128(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm1, 160(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm11, 288(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm9, 256(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm7, (%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm4, 32(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, 64(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm5, 224(%r9) +; AVX2-SLOW-NEXT: vmovdqa %ymm6, 192(%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; -; AVX2-FAST-ALL-LABEL: vf32: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: subq $72, %rsp -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdi), %ymm15 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %xmm9 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rsi), %xmm0 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm11 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdx), %xmm5 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %xmm10 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rcx), %xmm1 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} xmm12 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm2, %xmm2 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm8 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm3, %ymm4, %ymm2 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} xmm13 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13> -; AVX2-FAST-ALL-NEXT: vpshufb %xmm13, %xmm0, %xmm3 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[1,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} xmm7 = -; AVX2-FAST-ALL-NEXT: vpshufb %xmm7, %xmm1, %xmm3 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm6 = mem[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm3[2],xmm6[3],xmm3[4],xmm6[5,6],xmm3[7] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rsi), %ymm14 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm4, %ymm6, %ymm2 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdx), %ymm4 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rcx), %ymm6 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%r8), %ymm3 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm1, %xmm1 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpshufb %xmm13, %xmm9, %xmm0 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm11[1,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpshufb %xmm7, %xmm10, %xmm1 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm5 = mem[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4],xmm5[5,6],xmm1[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm15[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm5 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm5, %ymm14, %ymm1 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm7 = ymm4[3,2,3,3,7,6,7,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm1 = ymm7[0,1],ymm1[2],ymm7[3],ymm1[4],ymm7[5,6],ymm1[7],ymm7[8,9],ymm1[10],ymm7[11],ymm1[12],ymm7[13,14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm0 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm0, %ymm6, %ymm1 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm7 = ymm3[0,1,0,1,4,5,4,5] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2],ymm7[3],ymm1[4,5],ymm7[6],ymm1[7,8],ymm7[9],ymm1[10],ymm7[11],ymm1[12,13],ymm7[14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm7 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm7, %ymm14, %ymm11 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm12 = ymm15[0,1,2,1,4,5,6,5] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7],ymm11[8,9],ymm12[10],ymm11[11],ymm12[12],ymm11[13,14],ymm12[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm1, %ymm11, %ymm1 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %ymm11 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm0, %ymm11, %ymm1 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %ymm2 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm13 = ymm2[0,1,0,1,4,5,4,5] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2],ymm13[3],ymm1[4,5],ymm13[6],ymm1[7,8],ymm13[9],ymm1[10],ymm13[11],ymm1[12,13],ymm13[14],ymm1[15] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %ymm13 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm7, %ymm13, %ymm7 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,1,2,1,4,5,6,5] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3],ymm9[4],ymm7[5,6],ymm9[7],ymm7[8,9],ymm9[10],ymm7[11],ymm9[12],ymm7[13,14],ymm9[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm1, %ymm7, %ymm1 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm7 = ymm0[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm9 = ymm2[2,3,2,3,6,7,6,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4,5],ymm7[6],ymm9[7,8],ymm7[9],ymm9[10],ymm7[11],ymm9[12,13],ymm7[14],ymm9[15] -; AVX2-FAST-ALL-NEXT: vpshufb %ymm5, %ymm13, %ymm5 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %ymm9 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[3,2,3,3,7,6,7,7] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3],ymm5[4],ymm10[5,6],ymm5[7],ymm10[8,9],ymm5[10],ymm10[11],ymm5[12],ymm10[13,14],ymm5[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm7, %ymm5, %ymm5 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm7 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm7, %ymm6, %ymm10 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[3,0,3,0,7,4,7,4] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7,8],ymm10[9],ymm12[10],ymm10[11],ymm12[12,13],ymm10[14],ymm12[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm12 = ymm15[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm3 = ymm12[0],ymm3[1,2],ymm12[3],ymm3[4],ymm12[5],ymm3[6,7],ymm12[8],ymm3[9,10],ymm12[11],ymm3[12],ymm12[13],ymm3[14,15] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm10, %ymm3, %ymm3 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm7, %ymm11, %ymm7 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm10 = ymm9[3,0,3,0,7,4,7,4] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2],ymm7[3],ymm10[4,5],ymm7[6],ymm10[7,8],ymm7[9],ymm10[10],ymm7[11],ymm10[12,13],ymm7[14],ymm10[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm8, %ymm7, %ymm0, %ymm0 -; AVX2-FAST-ALL-NEXT: vpbroadcastq (%r8), %ymm2 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpbroadcastq 40(%rdi), %ymm8 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpbroadcastq 32(%r8), %ymm10 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpbroadcastq 8(%rdi), %ymm12 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpbroadcastq {{.*#+}} ymm15 = [25769803781,25769803781,25769803781,25769803781] -; AVX2-FAST-ALL-NEXT: vpermd %ymm4, %ymm15, %ymm4 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload -; AVX2-FAST-ALL-NEXT: vpermd %ymm9, %ymm15, %ymm9 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm1, %ymm9, %ymm1 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm9 = ymm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm9, %ymm5 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-FAST-ALL-NEXT: vpshufb %ymm9, %ymm14, %ymm11 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm3, %ymm11, %ymm3 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm9, %ymm13, %ymm9 -; AVX2-FAST-ALL-NEXT: vpblendvb %ymm7, %ymm0, %ymm9, %ymm0 -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm0, 64(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm3, 224(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm5, 128(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm1, 96(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm4, 256(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm6, 288(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm12, 32(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm10, 160(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm8, 192(%r9) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm2, (%r9) -; AVX2-FAST-ALL-NEXT: addq $72, %rsp -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: vf32: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: subq $72, %rsp -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm9 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm1, %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm12 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm5 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm11 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm2 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm3, %xmm3 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm3[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255,255,255,0,0,0,0,u,u,255,255> -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm1, %ymm4, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm14 = <6,7,u,u,10,11,u,u,8,9,8,9,u,u,12,13> -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm14, %xmm0, %xmm4 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm1 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm2, %xmm7 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = mem[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4],xmm6[5,6],xmm7[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm10 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm4, %ymm6, %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm7 -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm0, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm2, %xmm2 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm14, %xmm9, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[1,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm11, %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = mem[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm15[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm8[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm5 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm10, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm7[3,2,3,3,7,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm0 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm0, %ymm6, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm8[0,1,0,1,4,5,4,5] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm10, %ymm11 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm12 = ymm15[0,1,2,1,4,5,6,5] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7],ymm11[8,9],ymm12[10],ymm11[11],ymm12[12],ymm11[13,14],ymm12[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm1, %ymm11, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm11 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm0, %ymm11, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm13 = ymm2[0,1,0,1,4,5,4,5] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm13[1],ymm0[2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7,8],ymm13[9],ymm0[10],ymm13[11],ymm0[12,13],ymm13[14],ymm0[15] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm13 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm13, %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm0[0,1,2,1,4,5,6,5] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm14[2],ymm4[3],ymm14[4],ymm4[5,6],ymm14[7],ymm4[8,9],ymm14[10],ymm4[11],ymm14[12],ymm4[13,14],ymm14[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm1, %ymm4, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm4 = ymm0[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm2[2,3,2,3,6,7,6,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm4 = ymm14[0],ymm4[1],ymm14[2],ymm4[3],ymm14[4,5],ymm4[6],ymm14[7,8],ymm4[9],ymm14[10],ymm4[11],ymm14[12,13],ymm4[14],ymm14[15] -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm13, %ymm5 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm14 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm14[3,2,3,3,7,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm5 = ymm9[0,1],ymm5[2],ymm9[3],ymm5[4],ymm9[5,6],ymm5[7],ymm9[8,9],ymm5[10],ymm9[11],ymm5[12],ymm9[13,14],ymm5[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm4, %ymm5, %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm5 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm6, %ymm9 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm12 = ymm7[3,0,3,0,7,4,7,4] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm9 = ymm12[0],ymm9[1],ymm12[2],ymm9[3],ymm12[4,5],ymm9[6],ymm12[7,8],ymm9[9],ymm12[10],ymm9[11],ymm12[12,13],ymm9[14],ymm12[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm15[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm8 = ymm12[0],ymm8[1,2],ymm12[3],ymm8[4],ymm12[5],ymm8[6,7],ymm12[8],ymm8[9,10],ymm12[11],ymm8[12],ymm12[13],ymm8[14,15] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm9, %ymm8, %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm11, %ymm5 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm14[3,0,3,0,7,4,7,4] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7,8],ymm5[9],ymm9[10],ymm5[11],ymm9[12,13],ymm5[14],ymm9[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm5, %ymm0, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq (%r8), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq 40(%rdi), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, (%rsp), %ymm5, %ymm5 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq 32(%r8), %ymm9 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq 8(%rdi), %ymm12 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm6 = ymm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm14 = ymm14[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm1, %ymm14, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm11 = ymm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm4, %ymm11, %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,0,1,0,1,0,1,14,15,2,3,2,3,14,15,16,17,16,17,16,17,16,17,30,31,18,19,18,19,30,31] -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm10, %ymm10 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm8, %ymm10, %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm13, %ymm10 -; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm3, %ymm0, %ymm10, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 64(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 224(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 128(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 96(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 256(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 288(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm12, 32(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, 160(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, 192(%r9) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, (%r9) -; AVX2-FAST-PERLANE-NEXT: addq $72, %rsp -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-FAST-LABEL: vf32: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm0 +; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm1 +; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm2 +; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %ymm3 +; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm4 +; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm5 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm5[0,1,1,1] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm2, %ymm6, %ymm6 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[1,1,2,2] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm3, %ymm5, %ymm5 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm4[1,1,2,2] +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm2, %ymm9, %ymm2 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1] +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm4 +; AVX2-FAST-NEXT: vpbroadcastq (%r8), %ymm7 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm0, %ymm7, %ymm7 +; AVX2-FAST-NEXT: vpbroadcastq 48(%r8), %ymm9 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm10, %ymm0, %ymm9, %ymm9 +; AVX2-FAST-NEXT: vpbroadcastq 56(%r8), %ymm11 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = [0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,0,0] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm1, %ymm11, %ymm11 +; AVX2-FAST-NEXT: vpbroadcastq 32(%r8), %ymm13 +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm1, %ymm13, %ymm1 +; AVX2-FAST-NEXT: vpbroadcastq 24(%r8), %ymm8 +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm0, %ymm8, %ymm0 +; AVX2-FAST-NEXT: vpbroadcastq 16(%r8), %ymm8 +; AVX2-FAST-NEXT: vpblendvb %ymm10, %ymm3, %ymm8, %ymm3 +; AVX2-FAST-NEXT: vmovdqa %ymm3, 96(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm0, 128(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm1, 160(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm11, 288(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm9, 256(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm7, (%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm4, 32(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm2, 64(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm5, 224(%r9) +; AVX2-FAST-NEXT: vmovdqa %ymm6, 192(%r9) +; AVX2-FAST-NEXT: vzeroupper +; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: vf32: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 -; AVX512-NEXT: vmovdqu64 (%rcx), %zmm3 -; AVX512-NEXT: vmovdqu64 (%r8), %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = -; AVX512-NEXT: vpermi2w %zmm3, %zmm2, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,32,u,u,u,1,33,u,u,u,2,34,u,u,u,3,35,u,u,u,4,36,u,u,u,5,37,u,u,u,6,38> -; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm6 -; AVX512-NEXT: movl $415641996, %eax # imm = 0x18C6318C -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vmovdqu16 %zmm5, %zmm6 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31] -; AVX512-NEXT: vpermi2w %zmm4, %zmm6, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = -; AVX512-NEXT: vpermi2w %zmm0, %zmm4, %zmm6 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = <6,38,u,u,u,7,39,u,u,u,8,40,u,u,u,9,41,u,u,u,10,42,u,u,u,11,43,u,u,u,12,44> -; AVX512-NEXT: vpermi2w %zmm3, %zmm2, %zmm7 -; AVX512-NEXT: vmovdqu16 %zmm6, %zmm7 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,39,5,6,7,8,40,10,11,12,13,41,15,16,17,18,42,20,21,22,23,43,25,26,27,28,44,30,31] -; AVX512-NEXT: vpermi2w %zmm1, %zmm7, %zmm6 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = <12,45,u,u,u,13,46,u,u,u,14,47,u,u,u,15,48,u,u,u,16,49,u,u,u,17,50,u,u,u,18,51> -; AVX512-NEXT: vpermi2w %zmm0, %zmm4, %zmm8 -; AVX512-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,45,5,6,7,8,46,10,11,12,13,47,15,16,17,18,48,20,21,22,23,49,25,26,27,28,50,30,31] -; AVX512-NEXT: vpermi2w %zmm3, %zmm8, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm8 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <19,51,u,u,u,20,52,u,u,u,21,53,u,u,u,22,54,u,u,u,23,55,u,u,u,24,56,u,u,u,25,57> -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm9 -; AVX512-NEXT: vmovdqu16 %zmm8, %zmm9 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,52,5,6,7,8,53,10,11,12,13,54,15,16,17,18,55,20,21,22,23,56,25,26,27,28,57,30,31] -; AVX512-NEXT: vpermi2w %zmm0, %zmm9, %zmm8 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = -; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm9 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = <25,57,u,u,u,26,58,u,u,u,27,59,u,u,u,28,60,u,u,u,29,61,u,u,u,30,62,u,u,u,31,63> -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm0 -; AVX512-NEXT: vmovdqu16 %zmm9, %zmm0 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,58,5,6,7,8,59,10,11,12,13,60,15,16,17,18,61,20,21,22,23,62,25,26,27,28,63,30,31] +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rcx), %zmm1 +; AVX512-NEXT: vmovdqu64 (%r8), %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,32,5,6,7,8,33,10,11,12,13,34,15,16,17,18,35,20,21,22,23,36,25,26,27,28,37,30,31] +; AVX512-NEXT: vpermi2w %zmm2, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,38,3,4,5,6,39,8,9,10,11,40,13,14,15,16,41,18,19,20,21,42,23,24,25,26,43,28,29,30,31] +; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [44,1,2,3,4,45,6,7,8,9,46,11,12,13,14,47,16,17,18,19,48,21,22,23,24,49,26,27,28,29,50,31] +; AVX512-NEXT: vpermi2w %zmm2, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,51,4,5,6,7,52,9,10,11,12,53,14,15,16,17,54,19,20,21,22,55,24,25,26,27,56,29,30,31] +; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm6 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,57,2,3,4,5,58,7,8,9,10,59,12,13,14,15,60,17,18,19,20,61,22,23,24,25,62,27,28,29,30,63] ; AVX512-NEXT: vpermi2w %zmm2, %zmm0, %zmm1 ; AVX512-NEXT: vmovdqu64 %zmm1, 256(%r9) -; AVX512-NEXT: vmovdqu64 %zmm8, 192(%r9) -; AVX512-NEXT: vmovdqu64 %zmm7, 128(%r9) -; AVX512-NEXT: vmovdqu64 %zmm6, 64(%r9) -; AVX512-NEXT: vmovdqu64 %zmm5, (%r9) +; AVX512-NEXT: vmovdqu64 %zmm6, 192(%r9) +; AVX512-NEXT: vmovdqu64 %zmm5, 128(%r9) +; AVX512-NEXT: vmovdqu64 %zmm4, 64(%r9) +; AVX512-NEXT: vmovdqu64 %zmm3, (%r9) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <32 x i16>, <32 x i16>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll @@ -141,31 +141,29 @@ ; SSE-LABEL: vf4: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3] -; SSE-NEXT: movdqa %xmm1, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[0,2] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] ; SSE-NEXT: movdqa %xmm2, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,2] -; SSE-NEXT: movaps %xmm0, 32(%rax) -; SSE-NEXT: movaps %xmm5, 16(%rax) -; SSE-NEXT: movaps %xmm4, (%rax) +; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm3[0] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[3,1,1,3] +; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,0,2,3,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,5,7] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,2],xmm6[0,3] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0] +; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,4,6,7] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,1],xmm1[1,3] +; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[0,3] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm5[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[0,1] +; SSE-NEXT: movaps %xmm2, 16(%rax) +; SSE-NEXT: movaps %xmm0, (%rax) +; SSE-NEXT: movaps %xmm4, 32(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf4: @@ -344,229 +342,141 @@ ; SSE-LABEL: vf8: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movdqa (%rdi), %xmm3 -; SSE-NEXT: movdqa (%rsi), %xmm8 -; SSE-NEXT: movdqa (%rdx), %xmm0 -; SSE-NEXT: movdqa (%rcx), %xmm9 -; SSE-NEXT: movdqa (%r8), %xmm5 -; SSE-NEXT: movdqa (%r9), %xmm10 -; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3] -; SSE-NEXT: movdqa %xmm5, %xmm2 -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3] -; SSE-NEXT: movdqa %xmm2, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[1,3] -; SSE-NEXT: movdqa %xmm0, %xmm13 -; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3] -; SSE-NEXT: movdqa %xmm13, %xmm11 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm11 = xmm11[0],xmm1[0] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm7[0,2] -; SSE-NEXT: movdqa %xmm13, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm2[3,3] -; SSE-NEXT: movaps %xmm1, %xmm12 -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm2[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm6[0,2] -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7] +; SSE-NEXT: movdqa (%r8), %xmm1 +; SSE-NEXT: movdqa (%r9), %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,65535,0] +; SSE-NEXT: movdqa %xmm1, %xmm4 +; SSE-NEXT: pand %xmm0, %xmm4 +; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,1,2,1] +; SSE-NEXT: movdqa %xmm0, %xmm9 +; SSE-NEXT: pandn %xmm5, %xmm9 +; SSE-NEXT: por %xmm4, %xmm9 +; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] +; SSE-NEXT: pandn %xmm5, %xmm0 +; SSE-NEXT: por %xmm4, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm1, %xmm5 +; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: movdqa %xmm3, %xmm6 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm0[1] -; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSE-NEXT: movdqa %xmm5, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm0[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[0,2] -; SSE-NEXT: movaps %xmm0, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm5[3,3] -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm5[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm6[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm3[1,3] -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[0,2] -; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm13[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm13[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[0,2] -; SSE-NEXT: movaps %xmm2, 16(%rax) -; SSE-NEXT: movaps %xmm0, 48(%rax) -; SSE-NEXT: movaps %xmm4, 80(%rax) -; SSE-NEXT: movaps %xmm7, 64(%rax) -; SSE-NEXT: movaps %xmm12, 32(%rax) -; SSE-NEXT: movaps %xmm11, (%rax) +; SSE-NEXT: pslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5] +; SSE-NEXT: movdqa %xmm4, %xmm7 +; SSE-NEXT: pandn %xmm6, %xmm7 +; SSE-NEXT: por %xmm5, %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: pand %xmm6, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,2,3,3] +; SSE-NEXT: movdqa %xmm6, %xmm2 +; SSE-NEXT: pandn %xmm8, %xmm2 +; SSE-NEXT: por %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,1,1] +; SSE-NEXT: pslld $16, %xmm3 +; SSE-NEXT: pandn %xmm3, %xmm4 +; SSE-NEXT: por %xmm5, %xmm4 +; SSE-NEXT: pandn %xmm8, %xmm6 +; SSE-NEXT: por %xmm1, %xmm6 +; SSE-NEXT: movdqa %xmm6, 16(%rax) +; SSE-NEXT: movdqa %xmm4, 48(%rax) +; SSE-NEXT: movdqa %xmm2, 64(%rax) +; SSE-NEXT: movdqa %xmm7, (%rax) +; SSE-NEXT: movdqa %xmm0, 80(%rax) +; SSE-NEXT: movdqa %xmm9, 32(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf8: ; AVX1: # %bb.0: ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovdqa (%rdi), %xmm9 -; AVX1-NEXT: vmovdqa (%rsi), %xmm1 -; AVX1-NEXT: vmovdqa (%rdx), %xmm8 -; AVX1-NEXT: vmovdqa (%rcx), %xmm11 -; AVX1-NEXT: vmovdqa (%r8), %xmm4 -; AVX1-NEXT: vmovdqa (%r9), %xmm5 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5],xmm2[6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5],xmm2[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm2, %ymm10 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,1,0,1] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5],xmm5[6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[0,0,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[2,2,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5],xmm4[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5],xmm3[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vmovaps %ymm1, 64(%rax) -; AVX1-NEXT: vmovaps %ymm0, 32(%rax) -; AVX1-NEXT: vmovaps %ymm10, (%rax) +; AVX1-NEXT: vmovdqa (%rdx), %xmm0 +; AVX1-NEXT: vmovdqa (%rcx), %xmm1 +; AVX1-NEXT: vmovdqa (%r8), %xmm2 +; AVX1-NEXT: vmovdqa (%r9), %xmm3 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm1[0,1],xmm4[2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0,1,2,3],xmm6[4,5],xmm0[6,7] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,3,4,5],xmm4[6,7] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3,4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm0, 64(%rax) +; AVX1-NEXT: vmovaps %ymm3, 32(%rax) +; AVX1-NEXT: vmovaps %ymm5, (%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf8: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm2 -; AVX2-SLOW-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29] -; AVX2-SLOW-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-SLOW-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] +; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-SLOW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-SLOW-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,2,0,2] +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3] +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 32(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm3, 32(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, (%rax) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-ALL-LABEL: vf8: ; AVX2-FAST-ALL: # %bb.0: ; AVX2-FAST-ALL-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %xmm2 -; AVX2-FAST-ALL-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1 -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,4,1,5,0,4,1,5] -; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm3, %ymm3 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,4,5,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u,24,25,28,29] -; AVX2-FAST-ALL-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2 -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,4,4,0,0,4,4,0] -; AVX2-FAST-ALL-NEXT: # ymm4 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm2, %ymm4, %ymm4 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,12,13,8,9,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [2,6,1,5,2,6,1,5] -; AVX2-FAST-ALL-NEXT: # ymm5 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm5, %ymm5 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,10,11,14,15,u,u,u,u,u,u,u,u,16,17,20,21,u,u,u,u,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3] -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [2,6,3,7,2,6,3,7] -; AVX2-FAST-ALL-NEXT: # ymm5 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm5, %ymm1 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,6,7,u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,26,27,30,31,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [7,3,3,7,7,3,3,7] +; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-FAST-ALL-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-FAST-ALL-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1 +; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,4,4,0,0,4,4,0] ; AVX2-FAST-ALL-NEXT: # ymm2 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm2, %ymm0 -; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,8,9,12,13,u,u,u,u,u,u,u,u,22,23,18,19,u,u,u,u,u,u,u,u] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] +; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm2 +; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,12,13,8,9,u,u,u,u,u,u,u,u,18,19,22,23,u,u,u,u,u,u,u,u] +; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7] +; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3] +; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] +; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7] +; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3] +; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] +; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] ; AVX2-FAST-ALL-NEXT: vmovdqa %ymm0, 64(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm4, 32(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-FAST-ALL-NEXT: vmovdqa %ymm3, 32(%rax) +; AVX2-FAST-ALL-NEXT: vmovdqa %ymm2, (%rax) ; AVX2-FAST-ALL-NEXT: vzeroupper ; AVX2-FAST-ALL-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: vf8: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm2 -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,20,21,28,29] -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u,18,19,26,27] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm1[0,2,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] +; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1 +; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,2,0,2] +; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u] +; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7] +; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3] +; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15,u,u,u,u,u,u,u,u,16,17,24,25,u,u,u,u] +; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,3,10,11,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3] -; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] +; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,u,u,u,u,22,23,30,31] +; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 64(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 32(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 32(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, (%rax) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; @@ -611,517 +521,230 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <16 x i16>* %in.vecptr3, <16 x i16>* %in.vecptr4, <16 x i16>* %in.vecptr5, <96 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm13 -; SSE-NEXT: movdqa 16(%rdi), %xmm7 -; SSE-NEXT: movdqa (%rsi), %xmm9 -; SSE-NEXT: movdqa 16(%rsi), %xmm14 -; SSE-NEXT: movdqa (%rdx), %xmm2 -; SSE-NEXT: movdqa 16(%rdx), %xmm3 -; SSE-NEXT: movdqa (%rcx), %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%rcx), %xmm15 -; SSE-NEXT: movdqa 16(%r8), %xmm1 -; SSE-NEXT: movdqa 16(%r9), %xmm5 -; SSE-NEXT: movdqa %xmm1, %xmm4 -; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; SSE-NEXT: movdqa %xmm3, %xmm8 -; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7] -; SSE-NEXT: movdqa %xmm8, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm4[3,3] -; SSE-NEXT: movdqa %xmm7, %xmm12 -; SSE-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] -; SSE-NEXT: movdqa %xmm12, %xmm10 -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm4[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm6[0,2] -; SSE-NEXT: movdqa %xmm12, %xmm6 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm8[1] -; SSE-NEXT: movaps %xmm4, %xmm11 -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,1],xmm8[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm6[0,2] -; SSE-NEXT: movdqa (%r8), %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm12[1,3] -; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm12[0] -; SSE-NEXT: movdqa (%r9), %xmm12 -; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm4[0,2] -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] +; SSE-NEXT: movdqa (%r8), %xmm11 +; SSE-NEXT: movdqa 16(%r8), %xmm12 +; SSE-NEXT: movdqa (%r9), %xmm3 +; SSE-NEXT: movdqa 16(%r9), %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,65535,0] +; SSE-NEXT: movdqa %xmm12, %xmm1 +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] +; SSE-NEXT: movdqa %xmm0, %xmm8 +; SSE-NEXT: pandn %xmm2, %xmm8 +; SSE-NEXT: por %xmm1, %xmm8 +; SSE-NEXT: movdqa %xmm11, %xmm2 +; SSE-NEXT: pand %xmm0, %xmm2 +; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm7[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,1] +; SSE-NEXT: movdqa %xmm0, %xmm9 +; SSE-NEXT: pandn %xmm4, %xmm9 +; SSE-NEXT: por %xmm2, %xmm9 +; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,2,3] +; SSE-NEXT: movdqa %xmm0, %xmm10 +; SSE-NEXT: pandn %xmm4, %xmm10 +; SSE-NEXT: por %xmm1, %xmm10 +; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] +; SSE-NEXT: pandn %xmm1, %xmm0 +; SSE-NEXT: por %xmm2, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: movdqa %xmm11, %xmm4 +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,2,3,3] +; SSE-NEXT: movdqa %xmm1, %xmm13 +; SSE-NEXT: pandn %xmm2, %xmm13 +; SSE-NEXT: por %xmm4, %xmm13 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm7, %xmm5 +; SSE-NEXT: pslld $16, %xmm5 +; SSE-NEXT: movdqa %xmm6, %xmm15 +; SSE-NEXT: pandn %xmm5, %xmm15 +; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm7[0,0,1,1] +; SSE-NEXT: movdqa %xmm1, %xmm5 +; SSE-NEXT: pandn %xmm14, %xmm5 +; SSE-NEXT: pslldq {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4,5] +; SSE-NEXT: movdqa %xmm6, %xmm2 +; SSE-NEXT: pandn %xmm7, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm3[2,2,3,3] +; SSE-NEXT: movdqa %xmm1, %xmm7 +; SSE-NEXT: pandn %xmm14, %xmm7 +; SSE-NEXT: por %xmm4, %xmm7 ; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm1[3,3] -; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3] -; SSE-NEXT: movdqa %xmm7, %xmm14 -; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,1],xmm1[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm4[0,2] -; SSE-NEXT: movdqa %xmm7, %xmm4 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1] -; SSE-NEXT: movaps %xmm1, %xmm15 -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,1],xmm3[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[2,0],xmm4[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[1,3] -; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm7[0] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2] -; SSE-NEXT: movdqa %xmm6, %xmm1 -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm1[3,3] -; SSE-NEXT: movdqa %xmm13, %xmm7 -; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] -; SSE-NEXT: movdqa %xmm9, %xmm12 -; SSE-NEXT: movdqa %xmm7, %xmm9 -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm1[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm5[0,2] -; SSE-NEXT: movdqa %xmm7, %xmm0 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1] -; SSE-NEXT: movaps %xmm1, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm4[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[1,3] -; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm7[0] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2] -; SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload -; SSE-NEXT: # xmm6 = xmm6[0],mem[0],xmm6[1],mem[1],xmm6[2],mem[2],xmm6[3],mem[3] -; SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3] -; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3] -; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] -; SSE-NEXT: movdqa %xmm13, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm6[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2] -; SSE-NEXT: movdqa %xmm13, %xmm0 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1] -; SSE-NEXT: movaps %xmm6, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm13[1,3] -; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm13[0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[0,2] +; SSE-NEXT: pslld $16, %xmm4 +; SSE-NEXT: movdqa %xmm6, %xmm14 +; SSE-NEXT: pandn %xmm4, %xmm14 +; SSE-NEXT: movdqa %xmm12, %xmm4 +; SSE-NEXT: pand %xmm6, %xmm4 +; SSE-NEXT: por %xmm4, %xmm15 +; SSE-NEXT: pand %xmm1, %xmm12 +; SSE-NEXT: por %xmm4, %xmm14 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,1,1] +; SSE-NEXT: pandn %xmm4, %xmm1 +; SSE-NEXT: por %xmm12, %xmm5 +; SSE-NEXT: por %xmm12, %xmm1 +; SSE-NEXT: pand %xmm6, %xmm11 +; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5] +; SSE-NEXT: pandn %xmm3, %xmm6 +; SSE-NEXT: por %xmm11, %xmm2 +; SSE-NEXT: por %xmm11, %xmm6 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps %xmm2, (%rax) -; SSE-NEXT: movaps %xmm7, 16(%rax) -; SSE-NEXT: movaps %xmm1, 32(%rax) -; SSE-NEXT: movaps %xmm4, 48(%rax) -; SSE-NEXT: movaps %xmm5, 64(%rax) -; SSE-NEXT: movaps %xmm9, 80(%rax) -; SSE-NEXT: movaps %xmm3, 96(%rax) -; SSE-NEXT: movaps %xmm15, 112(%rax) -; SSE-NEXT: movaps %xmm14, 128(%rax) -; SSE-NEXT: movaps %xmm8, 144(%rax) -; SSE-NEXT: movaps %xmm11, 160(%rax) -; SSE-NEXT: movaps %xmm10, 176(%rax) +; SSE-NEXT: movdqa %xmm6, (%rax) +; SSE-NEXT: movdqa %xmm1, 16(%rax) +; SSE-NEXT: movdqa %xmm14, 48(%rax) +; SSE-NEXT: movdqa %xmm7, 64(%rax) +; SSE-NEXT: movdqa %xmm2, 96(%rax) +; SSE-NEXT: movdqa %xmm5, 112(%rax) +; SSE-NEXT: movdqa %xmm15, 144(%rax) +; SSE-NEXT: movdqa %xmm13, 160(%rax) +; SSE-NEXT: movdqa %xmm0, 32(%rax) +; SSE-NEXT: movdqa %xmm10, 80(%rax) +; SSE-NEXT: movdqa %xmm9, 128(%rax) +; SSE-NEXT: movdqa %xmm8, 176(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm0 -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm15 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm15[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm15[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vmovdqa (%rsi), %xmm8 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm0 -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa (%rdi), %xmm3 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm14 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX1-NEXT: vmovdqa (%r9), %xmm0 -; AVX1-NEXT: vmovdqa 16(%r9), %xmm10 -; AVX1-NEXT: vmovdqa (%r8), %xmm6 -; AVX1-NEXT: vmovdqa 16(%r8), %xmm5 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm9, %ymm9 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7] -; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm13 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm12 = xmm13[2,3,2,3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm8[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm12, %ymm12 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm9[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7] -; AVX1-NEXT: vmovdqa (%rcx), %xmm2 -; AVX1-NEXT: vmovdqa (%rdx), %xmm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm6[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm2, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm2[2],ymm12[3,4],ymm2[5],ymm12[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm8[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload -; AVX1-NEXT: # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6],ymm5[7] -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm5 # 16-byte Folded Reload -; AVX1-NEXT: # xmm5 = xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm15[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm13[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm13, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %ymm1, (%rax) -; AVX1-NEXT: vmovaps %ymm0, 128(%rax) -; AVX1-NEXT: vmovaps %ymm2, 160(%rax) -; AVX1-NEXT: vmovaps %ymm12, 64(%rax) -; AVX1-NEXT: vmovaps %ymm11, 32(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 96(%rax) -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovdqa (%r8), %xmm2 +; AVX1-NEXT: vmovdqa 16(%r8), %xmm3 +; AVX1-NEXT: vmovdqa (%r9), %xmm4 +; AVX1-NEXT: vmovdqa 16(%r9), %xmm5 +; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm5[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm2[0],xmm0[1],xmm2[2,3,4,5,6],xmm0[7] +; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm3[0],xmm1[1],xmm3[2,3,4,5,6],xmm1[7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm4[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm2[0],xmm6[1],xmm2[2,3,4,5,6],xmm6[7] +; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm7[1],xmm3[2,3,4,5,6],xmm7[7] +; AVX1-NEXT: vpslld $16, %xmm5, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm12 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm13 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2],xmm6[3],xmm3[4,5,6,7] +; AVX1-NEXT: vpslldq {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm2[0,1,2,3,4],xmm7[5],xmm2[6,7] +; AVX1-NEXT: vpslld $16, %xmm4, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3],xmm2[4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5,6,7] +; AVX1-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6,7] +; AVX1-NEXT: vmovdqa %xmm2, 96(%rax) +; AVX1-NEXT: vmovdqa %xmm1, 112(%rax) +; AVX1-NEXT: vmovdqa %xmm4, 64(%rax) +; AVX1-NEXT: vmovdqa %xmm0, 48(%rax) +; AVX1-NEXT: vmovdqa %xmm7, (%rax) +; AVX1-NEXT: vmovdqa %xmm6, 16(%rax) +; AVX1-NEXT: vmovdqa %xmm13, 160(%rax) +; AVX1-NEXT: vmovdqa %xmm12, 144(%rax) +; AVX1-NEXT: vmovdqa %xmm11, 80(%rax) +; AVX1-NEXT: vmovdqa %xmm10, 32(%rax) +; AVX1-NEXT: vmovdqa %xmm9, 176(%rax) +; AVX1-NEXT: vmovdqa %xmm8, 128(%rax) ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf16: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm10 -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm12 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm14 -; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm15 -; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm9 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm9[1,2,2,3] -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm7 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[1,2,2,3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm5 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm1 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm2 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm6 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3] -; AVX2-SLOW-NEXT: vpbroadcastq %xmm4, %ymm4 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm15[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm14[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm11[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm6[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm13 = ymm10[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm13[0],ymm6[0],ymm13[1],ymm6[1],ymm13[2],ymm6[2],ymm13[3],ymm6[3],ymm13[8],ymm6[8],ymm13[9],ymm6[9],ymm13[10],ymm6[10],ymm13[11],ymm6[11] -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm13 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7] -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm6 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm3 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3] -; AVX2-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,0,2,2,5,4,6,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[8],ymm15[8],ymm14[9],ymm15[9],ymm14[10],ymm15[10],ymm14[11],ymm15[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm14[4],ymm15[4],ymm14[5],ymm15[5],ymm14[6],ymm15[6],ymm14[7],ymm15[7],ymm14[12],ymm15[12],ymm14[13],ymm15[13],ymm14[14],ymm15[14],ymm14[15],ymm15[15] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-SLOW-NEXT: vmovdqa %ymm2, 160(%rax) +; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm0 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm1 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm0, %ymm2, %ymm2 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[2,3,2,3,6,7,6,7] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm4[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm7 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm7[0,0,2,1,4,5,6,7] +; AVX2-SLOW-NEXT: vpbroadcastq %xmm3, %ymm3 +; AVX2-SLOW-NEXT: vpblendvb %ymm6, %ymm0, %ymm3, %ymm3 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm7[0,2,2,3,4,5,6,7] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm0, %ymm6, %ymm6 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,2,2,1,4,5,6,7] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm7, %ymm0 +; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rax) ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 96(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 128(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm8, 64(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm4, 160(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, 128(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm6, 32(%rax) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; -; AVX2-FAST-ALL-LABEL: vf16: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm10 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %ymm14 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %ymm13 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r9), %ymm15 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %xmm8 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11] -; AVX2-FAST-ALL-NEXT: vpshufb %xmm0, %xmm8, %xmm1 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm7 -; AVX2-FAST-ALL-NEXT: vpshufb %xmm0, %xmm7, %xmm0 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa (%r9), %xmm5 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %xmm0 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %xmm1 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm4 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm6 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm3, %ymm3 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm6 = <1,u,u,2,u,u,3,u> -; AVX2-FAST-ALL-NEXT: vpermd %ymm3, %ymm6, %ymm3 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6],ymm6[7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm9 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm3 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm3, %ymm11, %ymm6 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm3, %ymm10, %ymm3 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm6 = ymm15[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm12 = ymm13[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[8],ymm6[8],ymm12[9],ymm6[9],ymm12[10],ymm6[10],ymm12[11],ymm6[11] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %ymm12 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm6 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm2 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[8],ymm6[8],ymm2[9],ymm6[9],ymm2[10],ymm6[10],ymm2[11],ymm6[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm2 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm1 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm0, %ymm0 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm2 = -; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm1 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm14[4],ymm12[4],ymm14[5],ymm12[5],ymm14[6],ymm12[6],ymm14[7],ymm12[7],ymm14[12],ymm12[12],ymm14[13],ymm12[13],ymm14[14],ymm12[14],ymm14[15],ymm12[15] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u> -; AVX2-FAST-ALL-NEXT: vpermd %ymm2, %ymm4, %ymm2 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm13[4],ymm15[4],ymm13[5],ymm15[5],ymm13[6],ymm15[6],ymm13[7],ymm15[7],ymm13[12],ymm15[12],ymm13[13],ymm15[13],ymm13[14],ymm15[14],ymm13[15],ymm15[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm2, 160(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm1, 96(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm0, (%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm3, 128(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm9, 64(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: vf16: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm13 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm11 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm12 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm14 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %ymm9 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm6 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm6, %xmm7 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm7 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm8[1],ymm2[2,3],ymm8[4],ymm2[5,6],ymm8[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm3 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm2 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm5 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm4, %ymm4 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,2,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[1,1,1,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm0, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm9[0],ymm14[1],ymm9[1],ymm14[2],ymm9[2],ymm14[3],ymm9[3],ymm14[8],ymm9[8],ymm14[9],ymm9[9],ymm14[10],ymm9[10],ymm14[11],ymm9[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm14[4],ymm9[4],ymm14[5],ymm9[5],ymm14[6],ymm9[6],ymm14[7],ymm9[7],ymm14[12],ymm9[12],ymm14[13],ymm9[13],ymm14[14],ymm9[14],ymm14[15],ymm9[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm13[4],ymm15[4],ymm13[5],ymm15[5],ymm13[6],ymm15[6],ymm13[7],ymm15[7],ymm13[12],ymm15[12],ymm13[13],ymm15[13],ymm13[14],ymm15[14],ymm13[15],ymm15[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm15, %ymm3 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm13, %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm9[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm4 = ymm14[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm3 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm4 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 128(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 160(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 96(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, (%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm10, 64(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-FAST-LABEL: vf16: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0 +; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm1 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm0, %ymm2, %ymm2 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0] +; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1 +; AVX2-FAST-NEXT: vmovdqa (%r9), %xmm7 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm7[0,0,2,1,4,5,6,7] +; AVX2-FAST-NEXT: vpbroadcastq %xmm3, %ymm3 +; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm0, %ymm3, %ymm3 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm8, %ymm5 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1] +; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm7, %ymm0 +; AVX2-FAST-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm1, 160(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm4, 128(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm2, 96(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm5, 32(%rax) +; AVX2-FAST-NEXT: vzeroupper +; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: vf16: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512-NEXT: vmovdqa (%rdx), %ymm1 -; AVX512-NEXT: vmovdqa (%r8), %ymm2 -; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1 -; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,16,32,48,u,u,1,17,33,49,u,u,2,18,34,50,u,u,3,19,35,51,u,u,4,20,36,52,u,u,5,21> +; AVX512-NEXT: vmovdqa (%rdx), %ymm0 +; AVX512-NEXT: vmovdqa (%r8), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm0, %zmm0 +; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm1, %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,32,48,6,7,8,9,33,49,12,13,14,15,34,50,18,19,20,21,35,51,24,25,26,27,36,52,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,37,53,4,5,6,7,38,54,10,11,12,13,39,55,16,17,18,19,40,56,22,23,24,25,41,57,28,29,30,31] ; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,32,48,6,7,8,9,33,49,12,13,14,15,34,50,18,19,20,21,35,51,24,25,26,27,36,52,30,31] -; AVX512-NEXT: vpermi2w %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,21,37,53,u,u,6,22,38,54,u,u,7,23,39,55,u,u,8,24,40,56,u,u,9,25,41,57,u,u,10,26> -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,38,54,6,7,8,9,39,55,12,13,14,15,40,56,18,19,20,21,41,57,24,25,26,27,42,58,30,31] -; AVX512-NEXT: vpermi2w %zmm0, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,26,43,59,u,u,11,27,44,60,u,u,12,28,45,61,u,u,13,29,46,62,u,u,14,30,47,63,u,u,15,31> -; AVX512-NEXT: vpermi2w %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,43,59,6,7,8,9,44,60,12,13,14,15,45,61,18,19,20,21,46,62,24,25,26,27,47,63,30,31] -; AVX512-NEXT: vpermi2w %zmm1, %zmm3, %zmm0 -; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rax) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rax) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rax) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [42,58,2,3,4,5,43,59,8,9,10,11,44,60,14,15,16,17,45,61,20,21,22,23,46,62,26,27,28,29,47,63] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32 @@ -1147,1143 +770,487 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>* %in.vecptr2, <32 x i16>* %in.vecptr3, <32 x i16>* %in.vecptr4, <32 x i16>* %in.vecptr5, <192 x i16>* %out.vec) nounwind { ; SSE-LABEL: vf32: ; SSE: # %bb.0: -; SSE-NEXT: subq $104, %rsp -; SSE-NEXT: movdqa (%rdi), %xmm9 -; SSE-NEXT: movdqa 16(%rdi), %xmm12 -; SSE-NEXT: movdqa (%rsi), %xmm10 -; SSE-NEXT: movdqa 16(%rsi), %xmm8 -; SSE-NEXT: movdqa (%rdx), %xmm1 -; SSE-NEXT: movdqa 16(%rdx), %xmm6 -; SSE-NEXT: movdqa (%rcx), %xmm13 -; SSE-NEXT: movdqa 16(%rcx), %xmm11 -; SSE-NEXT: movdqa (%r8), %xmm5 -; SSE-NEXT: movdqa (%r9), %xmm14 -; SSE-NEXT: movdqa %xmm9, %xmm4 -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3] -; SSE-NEXT: movdqa %xmm5, %xmm3 -; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm4[1,3] -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3] -; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[0,2] -; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm4, %xmm7 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm7 = xmm7[1],xmm2[1] -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[0,2] -; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%r8), %xmm15 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm3[2,3] -; SSE-NEXT: movdqa 16(%r9), %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[0,2] -; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSE-NEXT: movdqa %xmm5, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3] -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7] -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm9[0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm9, %xmm0 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; SSE-NEXT: movdqa %xmm5, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm5[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm5[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2] -; SSE-NEXT: movaps %xmm9, (%rsp) # 16-byte Spill -; SSE-NEXT: movdqa %xmm12, %xmm13 -; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm13[1,3] -; SSE-NEXT: movdqa %xmm6, %xmm2 -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm13[0] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2] -; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm13, %xmm1 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1] -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm2[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[0,2] -; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm0[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,1],xmm0[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0],xmm2[0,2] -; SSE-NEXT: movdqa 32(%rdi), %xmm10 -; SSE-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm3[4],xmm15[5],xmm3[5],xmm15[6],xmm3[6],xmm15[7],xmm3[7] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm12[1,3] -; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] -; SSE-NEXT: movdqa %xmm6, %xmm1 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm12[0] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm12, %xmm0 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm6[1] -; SSE-NEXT: movdqa %xmm15, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm6[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 32(%rsi), %xmm8 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm15[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm15[2,3] -; SSE-NEXT: movdqa 32(%r8), %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm6[0,2] -; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm10, %xmm9 -; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] +; SSE-NEXT: subq $136, %rsp +; SSE-NEXT: movdqa (%r8), %xmm4 +; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 16(%r8), %xmm8 +; SSE-NEXT: movdqa 48(%r8), %xmm0 +; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa (%r9), %xmm9 +; SSE-NEXT: movdqa 16(%r9), %xmm12 ; SSE-NEXT: movdqa 32(%r9), %xmm3 -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm9[1,3] -; SSE-NEXT: movdqa 32(%rdx), %xmm7 -; SSE-NEXT: movdqa 32(%rcx), %xmm6 -; SSE-NEXT: movdqa %xmm7, %xmm1 -; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm9[0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm9, %xmm5 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm1[1] +; SSE-NEXT: movdqa 48(%r9), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,65535,65535,65535,0] +; SSE-NEXT: pand %xmm15, %xmm0 +; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] +; SSE-NEXT: movdqa %xmm15, %xmm5 +; SSE-NEXT: pandn %xmm2, %xmm5 +; SSE-NEXT: por %xmm0, %xmm5 +; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa %xmm4, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm4[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm4[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2] -; SSE-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm10[1,3] -; SSE-NEXT: movdqa %xmm7, %xmm12 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm12 = xmm12[0],xmm10[0] -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm2[0,2] -; SSE-NEXT: movdqa %xmm10, %xmm2 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm7[1] -; SSE-NEXT: movdqa %xmm0, %xmm11 -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,1],xmm7[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm2[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm0[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm0[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm7[0,2] -; SSE-NEXT: movdqa 48(%rdi), %xmm0 -; SSE-NEXT: movdqa 48(%rsi), %xmm1 +; SSE-NEXT: pand %xmm15, %xmm2 +; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm1[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1] +; SSE-NEXT: movdqa %xmm15, %xmm6 +; SSE-NEXT: pandn %xmm5, %xmm6 +; SSE-NEXT: por %xmm2, %xmm6 +; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] +; SSE-NEXT: movdqa %xmm15, %xmm6 +; SSE-NEXT: pandn %xmm5, %xmm6 +; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1] +; SSE-NEXT: movdqa %xmm15, %xmm4 +; SSE-NEXT: pandn %xmm5, %xmm4 +; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] +; SSE-NEXT: movdqa %xmm15, %xmm7 +; SSE-NEXT: pandn %xmm5, %xmm7 +; SSE-NEXT: por %xmm0, %xmm7 +; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] +; SSE-NEXT: movdqa %xmm15, %xmm5 +; SSE-NEXT: pandn %xmm0, %xmm5 +; SSE-NEXT: por %xmm2, %xmm5 +; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa %xmm9, %xmm7 +; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,4,6,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] +; SSE-NEXT: movdqa %xmm15, %xmm2 +; SSE-NEXT: pandn %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm8, %xmm5 +; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa %xmm8, %xmm0 +; SSE-NEXT: pand %xmm15, %xmm0 +; SSE-NEXT: por %xmm0, %xmm6 +; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa 32(%r8), %xmm9 +; SSE-NEXT: movdqa %xmm9, %xmm0 +; SSE-NEXT: pand %xmm15, %xmm0 +; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[0,2,2,3,4,5,6,7] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1] +; SSE-NEXT: pandn %xmm2, %xmm15 +; SSE-NEXT: por %xmm0, %xmm4 +; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill +; SSE-NEXT: por %xmm0, %xmm15 +; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3] +; SSE-NEXT: movdqa %xmm14, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: pslld $16, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm6, %xmm4 +; SSE-NEXT: pandn %xmm2, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1] +; SSE-NEXT: movdqa %xmm14, %xmm7 +; SSE-NEXT: pandn %xmm2, %xmm7 +; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5] +; SSE-NEXT: movdqa %xmm6, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,2,3,3] +; SSE-NEXT: movdqa %xmm14, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa %xmm3, %xmm1 +; SSE-NEXT: pslld $16, %xmm1 +; SSE-NEXT: movdqa %xmm6, %xmm13 +; SSE-NEXT: pandn %xmm1, %xmm13 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,1,1] +; SSE-NEXT: movdqa %xmm14, %xmm11 +; SSE-NEXT: pandn %xmm1, %xmm11 +; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5] +; SSE-NEXT: movdqa %xmm6, %xmm10 +; SSE-NEXT: pandn %xmm3, %xmm10 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,2,3,3] +; SSE-NEXT: movdqa %xmm14, %xmm8 +; SSE-NEXT: pandn %xmm1, %xmm8 +; SSE-NEXT: movdqa %xmm9, %xmm1 +; SSE-NEXT: pand %xmm14, %xmm1 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm1, %xmm8 +; SSE-NEXT: movdqa %xmm12, %xmm1 +; SSE-NEXT: pslld $16, %xmm1 +; SSE-NEXT: movdqa %xmm6, %xmm3 +; SSE-NEXT: pandn %xmm1, %xmm3 +; SSE-NEXT: movdqa %xmm5, %xmm1 +; SSE-NEXT: pand %xmm6, %xmm1 +; SSE-NEXT: por %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[0,0,1,1] +; SSE-NEXT: movdqa %xmm14, %xmm2 +; SSE-NEXT: pandn %xmm7, %xmm2 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload +; SSE-NEXT: movdqa %xmm5, %xmm7 +; SSE-NEXT: pand %xmm14, %xmm7 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: por %xmm7, %xmm0 +; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm7, %xmm2 +; SSE-NEXT: pslldq {{.*#+}} xmm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm12[0,1,2,3,4,5] +; SSE-NEXT: movdqa %xmm6, %xmm7 +; SSE-NEXT: pandn %xmm12, %xmm7 +; SSE-NEXT: pand %xmm6, %xmm9 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: por %xmm9, %xmm0 +; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm9, %xmm7 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,2,3,3] +; SSE-NEXT: movdqa %xmm14, %xmm12 +; SSE-NEXT: pandn %xmm9, %xmm12 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; SSE-NEXT: movdqa %xmm4, %xmm9 +; SSE-NEXT: pand %xmm14, %xmm9 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE-NEXT: por %xmm9, %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE-NEXT: movdqa 48(%r8), %xmm7 -; SSE-NEXT: movdqa 48(%r9), %xmm15 -; SSE-NEXT: movdqa %xmm7, %xmm4 -; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3] -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[1,3] -; SSE-NEXT: movdqa 48(%rdx), %xmm1 -; SSE-NEXT: movdqa 48(%rcx), %xmm14 -; SSE-NEXT: movdqa %xmm1, %xmm6 -; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3] -; SSE-NEXT: movdqa %xmm6, %xmm8 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm2[0] -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm5[0,2] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm6[1] -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm4[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm4[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[0,2] -; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm15[4],xmm7[5],xmm15[5],xmm7[6],xmm15[6],xmm7[7],xmm15[7] -; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] -; SSE-NEXT: movdqa %xmm7, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3] -; SSE-NEXT: movdqa %xmm1, %xmm4 -; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[0,2] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1] -; SSE-NEXT: movdqa %xmm7, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[1,1] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm7[3,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm7[2,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,2] +; SSE-NEXT: por %xmm9, %xmm12 +; SSE-NEXT: movdqa %xmm0, %xmm9 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pslld $16, %xmm9 +; SSE-NEXT: movdqa %xmm6, %xmm0 +; SSE-NEXT: pandn %xmm9, %xmm0 +; SSE-NEXT: pand %xmm6, %xmm5 +; SSE-NEXT: por %xmm5, %xmm13 +; SSE-NEXT: por %xmm5, %xmm0 +; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload +; SSE-NEXT: pand %xmm14, %xmm9 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,1,1] +; SSE-NEXT: pandn %xmm5, %xmm14 +; SSE-NEXT: por %xmm9, %xmm11 +; SSE-NEXT: por %xmm9, %xmm14 +; SSE-NEXT: pand %xmm6, %xmm4 +; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5] +; SSE-NEXT: pandn %xmm1, %xmm6 +; SSE-NEXT: por %xmm4, %xmm10 +; SSE-NEXT: por %xmm4, %xmm6 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps %xmm0, 368(%rax) -; SSE-NEXT: movaps %xmm6, 352(%rax) -; SSE-NEXT: movaps %xmm4, 336(%rax) -; SSE-NEXT: movaps %xmm2, 320(%rax) -; SSE-NEXT: movaps %xmm5, 304(%rax) -; SSE-NEXT: movaps %xmm8, 288(%rax) -; SSE-NEXT: movaps %xmm10, 272(%rax) -; SSE-NEXT: movaps %xmm11, 256(%rax) -; SSE-NEXT: movaps %xmm12, 240(%rax) -; SSE-NEXT: movaps %xmm9, 224(%rax) +; SSE-NEXT: movdqa %xmm6, (%rax) +; SSE-NEXT: movdqa %xmm14, 16(%rax) +; SSE-NEXT: movdqa %xmm0, 48(%rax) +; SSE-NEXT: movdqa %xmm12, 64(%rax) +; SSE-NEXT: movdqa %xmm7, 96(%rax) +; SSE-NEXT: movdqa %xmm2, 112(%rax) +; SSE-NEXT: movdqa %xmm3, 144(%rax) +; SSE-NEXT: movdqa %xmm8, 160(%rax) +; SSE-NEXT: movdqa %xmm10, 192(%rax) +; SSE-NEXT: movdqa %xmm11, 208(%rax) +; SSE-NEXT: movdqa %xmm13, 240(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 208(%rax) +; SSE-NEXT: movaps %xmm0, 256(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 192(%rax) +; SSE-NEXT: movaps %xmm0, 288(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 176(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 160(%rax) +; SSE-NEXT: movaps %xmm0, 304(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rax) -; SSE-NEXT: movaps %xmm13, 128(%rax) +; SSE-NEXT: movaps %xmm0, 336(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%rax) +; SSE-NEXT: movaps %xmm0, 352(%rax) +; SSE-NEXT: movdqa %xmm15, 32(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 96(%rax) -; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 80(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%rax) +; SSE-NEXT: movaps %xmm0, 128(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%rax) +; SSE-NEXT: movaps %xmm0, 176(%rax) +; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 224(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%rax) +; SSE-NEXT: movaps %xmm0, 272(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%rax) +; SSE-NEXT: movaps %xmm0, 320(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%rax) -; SSE-NEXT: addq $104, %rsp +; SSE-NEXT: movaps %xmm0, 368(%rax) +; SSE-NEXT: addq $136, %rsp ; SSE-NEXT: retq ; ; AVX1-LABEL: vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: subq $88, %rsp -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm0 -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm3 -; AVX1-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 32(%rsi), %xmm1 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm4 -; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm10[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7] -; AVX1-NEXT: vmovdqa 16(%r9), %xmm5 -; AVX1-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: subq $24, %rsp +; AVX1-NEXT: vmovdqa 32(%r8), %xmm0 +; AVX1-NEXT: vmovdqa 48(%r8), %xmm7 +; AVX1-NEXT: vmovdqa (%r9), %xmm1 +; AVX1-NEXT: vmovdqa 16(%r9), %xmm4 ; AVX1-NEXT: vmovdqa 32(%r9), %xmm3 -; AVX1-NEXT: vmovdqa 16(%r8), %xmm6 -; AVX1-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 32(%r8), %xmm4 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm14 = xmm9[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm14, %ymm14 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm14[2],ymm0[3,4],ymm14[5],ymm0[6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm14[2,3,2,3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm15 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm15[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7] -; AVX1-NEXT: vmovdqa 32(%rcx), %xmm4 -; AVX1-NEXT: vmovdqa 32(%rdx), %xmm0 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm13 = xmm0[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm13, %ymm4, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX1-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovdqa 48(%rcx), %xmm13 -; AVX1-NEXT: vmovdqa 48(%rdx), %xmm7 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2] +; AVX1-NEXT: vmovdqa 48(%r9), %xmm6 +; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1],xmm7[2,3,4,5,6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm2, (%rsp) # 16-byte Spill +; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3,4,5,6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1],xmm7[2,3,4,5,6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3,4,5,6],xmm2[7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm6[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm3 -; AVX1-NEXT: vmovdqa 48(%r9), %xmm5 -; AVX1-NEXT: vmovdqa 48(%r8), %xmm4 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm12 = xmm2[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm12, %ymm2, %ymm12 -; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm3[0],ymm12[1],ymm3[2,3],ymm12[4],ymm3[5,6],ymm12[7] -; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3 -; AVX1-NEXT: vmovdqa 48(%rdi), %xmm1 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11 -; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm8[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm8[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm14[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm4, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6],ymm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm15[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm15, %ymm4, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm14 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm15 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX1-NEXT: vmovdqa (%rsi), %xmm0 -; AVX1-NEXT: vmovdqa (%rdi), %xmm1 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm8[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqa (%r9), %xmm4 -; AVX1-NEXT: vmovdqa (%r8), %xmm5 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7] -; AVX1-NEXT: vmovdqa (%rcx), %xmm6 -; AVX1-NEXT: vmovdqa (%rdx), %xmm7 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm6[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload -; AVX1-NEXT: # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm5 # 16-byte Folded Reload -; AVX1-NEXT: # xmm5 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm7 -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload -; AVX1-NEXT: vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload -; AVX1-NEXT: # xmm7 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm7, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm9[2,3,2,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2,3],ymm6[4],ymm2[5,6],ymm6[7] -; AVX1-NEXT: vpermilps $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload -; AVX1-NEXT: # xmm6 = mem[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm8[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm5, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6],ymm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4],xmm5[5],xmm0[6,7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpslld $16, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3,4],xmm2[5],xmm7[6,7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm5[3],xmm0[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0,1,2,3,4],xmm2[5],xmm0[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7] +; AVX1-NEXT: vpslld $16, %xmm1, %xmm8 +; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm7[0,1,2,3,4],xmm8[5],xmm7[6,7] +; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3] +; AVX1-NEXT: vmovdqa 16(%r8), %xmm7 +; AVX1-NEXT: vpblendw {{.*#+}} xmm12 = xmm7[0],xmm5[1],xmm7[2,3,4,5,6],xmm5[7] +; AVX1-NEXT: vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,6,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm10[2,1,2,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm10 = xmm7[0],xmm5[1],xmm7[2,3,4,5,6],xmm5[7] +; AVX1-NEXT: vpslld $16, %xmm6, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3,4],xmm0[5],xmm7[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7] +; AVX1-NEXT: vpslld $16, %xmm4, %xmm5 +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3,4],xmm5[5],xmm7[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm14 = xmm7[0,1,2],xmm14[3],xmm7[4,5,6,7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1] +; AVX1-NEXT: vmovdqa (%r8), %xmm7 +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5,6],xmm6[7] +; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3,4,5,6],xmm4[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm15 = xmm3[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm15 = xmm7[0,1,2],xmm15[3],xmm7[4,5,6,7] +; AVX1-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3,4],xmm3[5],xmm7[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm13 = xmm1[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm13 = xmm7[0,1,2],xmm13[3],xmm7[4,5,6,7] +; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5],xmm7[6,7] ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %ymm0, (%rax) -; AVX1-NEXT: vmovaps %ymm2, 128(%rax) -; AVX1-NEXT: vmovaps %ymm4, 160(%rax) -; AVX1-NEXT: vmovaps %ymm3, 64(%rax) -; AVX1-NEXT: vmovaps %ymm11, 32(%rax) -; AVX1-NEXT: vmovaps %ymm15, 288(%rax) -; AVX1-NEXT: vmovaps %ymm14, 192(%rax) -; AVX1-NEXT: vmovaps %ymm13, 320(%rax) -; AVX1-NEXT: vmovaps %ymm12, 352(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 256(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 224(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 96(%rax) -; AVX1-NEXT: addq $88, %rsp -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovdqa %xmm1, (%rax) +; AVX1-NEXT: vmovdqa %xmm14, 16(%rax) +; AVX1-NEXT: vmovdqa %xmm8, 48(%rax) +; AVX1-NEXT: vmovdqa %xmm13, 64(%rax) +; AVX1-NEXT: vmovdqa %xmm9, 96(%rax) +; AVX1-NEXT: vmovdqa %xmm11, 112(%rax) +; AVX1-NEXT: vmovdqa %xmm5, 144(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 160(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 240(%rax) +; AVX1-NEXT: vmovdqa %xmm3, 192(%rax) +; AVX1-NEXT: vmovdqa %xmm2, 208(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 288(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 304(%rax) +; AVX1-NEXT: vmovdqa %xmm15, 256(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm1, 352(%rax) +; AVX1-NEXT: vmovdqa %xmm0, 336(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm0, 32(%rax) +; AVX1-NEXT: vmovdqa %xmm10, 80(%rax) +; AVX1-NEXT: vmovdqa %xmm4, 128(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm0, 176(%rax) +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm0, 224(%rax) +; AVX1-NEXT: vmovdqa %xmm12, 272(%rax) +; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; AVX1-NEXT: vmovaps %xmm0, 368(%rax) +; AVX1-NEXT: vmovdqa %xmm6, 320(%rax) +; AVX1-NEXT: addq $24, %rsp ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: vf32: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: subq $296, %rsp # imm = 0x128 -; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm10 -; AVX2-SLOW-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %xmm9 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm9[1,2,2,3] -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm11 -; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %xmm8 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm8[1,2,2,3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm15 -; AVX2-SLOW-NEXT: vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm5 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm12 -; AVX2-SLOW-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm3 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm13 -; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm2 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm7 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm14 -; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm1 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm6 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] -; AVX2-SLOW-NEXT: vpbroadcastq %xmm6, %ymm6 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7] +; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm2 +; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm1 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm9 +; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %ymm10 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm10[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm2, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm10[1,2,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm11[1,2,2,3] -; AVX2-SLOW-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm15[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[0,1,2,1] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7] -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm6 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm7 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm10[2,3,2,3,6,7,6,7] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm3, %ymm12 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm9[2,3,2,3,6,7,6,7] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm5[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm1, %ymm5, %ymm13 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3] +; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm2, %ymm6, %ymm15 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm0 +; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %xmm3 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm0[0,0,2,1,4,5,6,7] +; AVX2-SLOW-NEXT: vpbroadcastq %xmm5, %ymm5 +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm14, %ymm2, %ymm5, %ymm8 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm10[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] +; AVX2-SLOW-NEXT: vpblendvb %ymm14, %ymm1, %ymm5, %ymm10 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm3[0,0,2,1,4,5,6,7] ; AVX2-SLOW-NEXT: vpbroadcastq %xmm6, %ymm6 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3] +; AVX2-SLOW-NEXT: vpblendvb %ymm14, %ymm2, %ymm6, %ymm6 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm9 = ymm9[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2] +; AVX2-SLOW-NEXT: vpblendvb %ymm14, %ymm1, %ymm9, %ymm9 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa 32(%r9), %ymm4 -; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm0[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11] -; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm0[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm6[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm0 -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm0[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6],ymm6[7] -; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm10 -; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm4 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm0 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm15 = ymm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm10[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[8],ymm0[8],ymm15[9],ymm0[9],ymm15[10],ymm0[10],ymm15[11],ymm0[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm4, %ymm4 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm1, %ymm3, %ymm3 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,3,2,3] +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,2,2,1,4,5,6,7] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm2, %ymm5, %ymm2 +; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3] -; AVX2-SLOW-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm3[4],xmm11[5],xmm3[5],xmm11[6],xmm3[6],xmm11[7],xmm3[7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7] -; AVX2-SLOW-NEXT: vmovdqa %xmm13, %xmm12 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload -; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm6 -; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm5 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm5[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm6[1,2,2,3,5,6,6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm13 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm13[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm11 -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm11[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[1],ymm1[1],ymm8[2],ymm1[2],ymm8[3],ymm1[3],ymm8[8],ymm1[8],ymm8[9],ymm1[9],ymm8[10],ymm1[10],ymm8[11],ymm1[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm1 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm0 -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm9 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} ymm15 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm9 = ymm15[0],ymm9[0],ymm15[1],ymm9[1],ymm15[2],ymm9[2],ymm15[3],ymm9[3],ymm15[8],ymm9[8],ymm15[9],ymm9[9],ymm15[10],ymm9[10],ymm15[11],ymm9[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7] -; AVX2-SLOW-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[1,0,2,2] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7] -; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; AVX2-SLOW-NEXT: vpbroadcastq %xmm7, %ymm7 -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[8],ymm13[8],ymm11[9],ymm13[9],ymm11[10],ymm13[10],ymm11[11],ymm13[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm9 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[1,0,2,2,5,4,6,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6],ymm9[7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm9 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7] -; AVX2-SLOW-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm7 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm15 = ymm10[4],ymm4[4],ymm10[5],ymm4[5],ymm10[6],ymm4[6],ymm10[7],ymm4[7],ymm10[12],ymm4[12],ymm10[13],ymm4[13],ymm10[14],ymm4[14],ymm10[15],ymm4[15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0],ymm7[1],ymm15[2,3],ymm7[4],ymm15[5,6],ymm7[7] -; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm15 = ymm14[4],ymm12[4],ymm14[5],ymm12[5],ymm14[6],ymm12[6],ymm14[7],ymm12[7],ymm14[12],ymm12[12],ymm14[13],ymm12[13],ymm14[14],ymm12[14],ymm14[15],ymm12[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm12 = ymm14[0],ymm12[0],ymm14[1],ymm12[1],ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[8],ymm12[8],ymm14[9],ymm12[9],ymm14[10],ymm12[10],ymm14[11],ymm12[11] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm10[0],ymm4[0],ymm10[1],ymm4[1],ymm10[2],ymm4[2],ymm10[3],ymm4[3],ymm10[8],ymm4[8],ymm10[9],ymm4[9],ymm10[10],ymm4[10],ymm10[11],ymm4[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm12[2,2,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[1,0,2,2,5,4,6,6] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0],ymm4[1],ymm10[2,3],ymm4[4],ymm10[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm10[2],ymm4[3,4],ymm10[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm2[2,1,2,3,6,5,6,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm11[4],ymm13[4],ymm11[5],ymm13[5],ymm11[6],ymm13[6],ymm11[7],ymm13[7],ymm11[12],ymm13[12],ymm11[13],ymm13[13],ymm11[14],ymm13[14],ymm11[15],ymm13[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3] -; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-SLOW-NEXT: vmovdqa %ymm0, 160(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 288(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm7, 352(%rax) +; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa %ymm9, 96(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm6, 192(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm10, 288(%rax) ; AVX2-SLOW-NEXT: vmovdqa %ymm8, (%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 128(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 64(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 320(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 224(%rax) -; AVX2-SLOW-NEXT: addq $296, %rsp # imm = 0x128 +; AVX2-SLOW-NEXT: vmovdqa %ymm15, 128(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm13, 160(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm12, 352(%rax) +; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; AVX2-SLOW-NEXT: vmovaps %ymm1, 320(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, 64(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm3, 224(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm4, 256(%rax) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; -; AVX2-FAST-ALL-LABEL: vf32: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: subq $392, %rsp # imm = 0x188 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %xmm15 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rsi), %xmm14 -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11] -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm14, %xmm0 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %xmm8 -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdi), %xmm9 -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm9, %xmm1 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vmovdqa (%r9), %xmm10 -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%r9), %xmm3 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %xmm11 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%r8), %xmm2 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %xmm13 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rcx), %xmm1 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %xmm7 -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdx), %xmm0 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm5 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm5, %ymm5 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm15, %xmm5 -; AVX2-FAST-ALL-NEXT: vpshufb %xmm12, %xmm8, %xmm4 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm5 = xmm10[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm6 = xmm11[1,2,2,3] -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm5 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} xmm6 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm7, %xmm12 -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm5, %ymm5 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm5 = <1,2,1,2,u,u,3,3> -; AVX2-FAST-ALL-NEXT: vpermd %ymm4, %ymm5, %ymm4 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[1,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdi), %ymm5 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rsi), %ymm4 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm10 = -; AVX2-FAST-ALL-NEXT: vpshufb %ymm10, %ymm4, %ymm4 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm10, %ymm5, %ymm5 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[8],ymm4[8],ymm5[9],ymm4[9],ymm5[10],ymm4[10],ymm5[11],ymm4[11] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%r8), %ymm6 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%r9), %ymm5 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm7 = ymm6[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rdx), %ymm6 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa 32(%rcx), %ymm8 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm5 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3] -; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm4 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm4, %ymm0 -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = <1,2,1,2,u,u,3,3> -; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-ALL-NEXT: vmovdqa %xmm15, %xmm7 -; AVX2-FAST-ALL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm1 -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa (%rsi), %ymm15 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm10, %ymm15, %ymm0 -; AVX2-FAST-ALL-NEXT: vpshufb %ymm10, %ymm1, %ymm1 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; AVX2-FAST-ALL-NEXT: vmovdqa (%r8), %ymm13 -; AVX2-FAST-ALL-NEXT: vmovdqa (%r9), %ymm12 -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm3 = ymm12[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} ymm9 = ymm13[1,2,2,3,5,6,6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm9[0],ymm3[0],ymm9[1],ymm3[1],ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[8],ymm3[8],ymm9[9],ymm3[9],ymm9[10],ymm3[10],ymm9[11],ymm3[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm9 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vmovdqa (%rdx), %ymm11 -; AVX2-FAST-ALL-NEXT: vmovdqa (%rcx), %ymm6 -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm10 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpsrldq {{.*#+}} ymm14 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm14[0],ymm10[0],ymm14[1],ymm10[1],ymm14[2],ymm10[2],ymm14[3],ymm10[3],ymm14[8],ymm10[8],ymm14[9],ymm10[9],ymm14[10],ymm10[10],ymm14[11],ymm10[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload -; AVX2-FAST-ALL-NEXT: # xmm9 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] -; AVX2-FAST-ALL-NEXT: vpermd %ymm9, %ymm4, %ymm4 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0],ymm4[1],ymm9[2,3],ymm4[4],ymm9[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX2-FAST-ALL-NEXT: vpbroadcastq %xmm7, %ymm7 -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm11[0],ymm6[0],ymm11[1],ymm6[1],ymm11[2],ymm6[2],ymm11[3],ymm6[3],ymm11[8],ymm6[8],ymm11[9],ymm6[9],ymm11[10],ymm6[10],ymm11[11],ymm6[11] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm9 = [5,4,2,2,5,4,6,6] -; AVX2-FAST-ALL-NEXT: vpermd %ymm7, %ymm9, %ymm7 -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm4[0],ymm15[0],ymm4[1],ymm15[1],ymm4[2],ymm15[2],ymm4[3],ymm15[3],ymm4[8],ymm15[8],ymm4[9],ymm15[9],ymm4[10],ymm15[10],ymm4[11],ymm15[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[8],ymm12[8],ymm13[9],ymm12[9],ymm13[10],ymm12[10],ymm13[11],ymm12[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7] -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm10 = ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = [5,6,5,6,5,6,7,7] -; AVX2-FAST-ALL-NEXT: vpermd %ymm10, %ymm1, %ymm10 -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm14 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0],ymm14[1],ymm10[2,3],ymm14[4],ymm10[5,6],ymm14[7] -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm14 = ymm5[4],ymm2[4],ymm5[5],ymm2[5],ymm5[6],ymm2[6],ymm5[7],ymm2[7],ymm5[12],ymm2[12],ymm5[13],ymm2[13],ymm5[14],ymm2[14],ymm5[15],ymm2[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm14 = ymm14[3,3,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm14[2],ymm10[3,4],ymm14[5],ymm10[6,7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm8 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[8],ymm8[8],ymm0[9],ymm8[9],ymm0[10],ymm8[10],ymm0[11],ymm8[11] -; AVX2-FAST-ALL-NEXT: vpermd %ymm8, %ymm9, %ymm8 -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm9 = ymm5[0],ymm2[0],ymm5[1],ymm2[1],ymm5[2],ymm2[2],ymm5[3],ymm2[3],ymm5[8],ymm2[8],ymm5[9],ymm2[9],ymm5[10],ymm2[10],ymm5[11],ymm2[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7] -; AVX2-FAST-ALL-NEXT: vpunpcklwd {{.*#+}} ymm9 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm11[4],ymm6[4],ymm11[5],ymm6[5],ymm11[6],ymm6[6],ymm11[7],ymm6[7],ymm11[12],ymm6[12],ymm11[13],ymm6[13],ymm11[14],ymm6[14],ymm11[15],ymm6[15] -; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = [5,6,5,6,5,6,7,7] -; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm13[4],ymm12[4],ymm13[5],ymm12[5],ymm13[6],ymm12[6],ymm13[7],ymm12[7],ymm13[12],ymm12[12],ymm13[13],ymm12[13],ymm13[14],ymm12[14],ymm13[15],ymm12[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm4[4],ymm15[4],ymm4[5],ymm15[5],ymm4[6],ymm15[6],ymm4[7],ymm15[7],ymm4[12],ymm15[12],ymm4[13],ymm15[13],ymm4[14],ymm15[14],ymm4[15],ymm15[15] -; AVX2-FAST-ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3] -; AVX2-FAST-ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm0, 160(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm8, 288(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm10, 352(%rax) -; AVX2-FAST-ALL-NEXT: vmovdqa %ymm7, 96(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, (%rax) -; AVX2-FAST-ALL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 128(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 64(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 192(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 320(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 256(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 224(%rax) -; AVX2-FAST-ALL-NEXT: addq $392, %rsp # imm = 0x188 -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: vf32: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: subq $168, %rsp -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm9 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm10 -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rcx), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm12 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm13 -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11] -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm13, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm5 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm4 -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm4, %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r9), %xmm7 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm11[1],ymm1[2,3],ymm11[4],ymm1[5,6],ymm11[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm2 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm1 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm6 -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm3 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm1[2],ymm11[3,4],ymm1[5],ymm11[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,2,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[1,1,1,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %xmm11 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm13 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm4 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm0, %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm12, %xmm0 -; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm5, %xmm1 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm11[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm3 = xmm13[1,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm1 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm3 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm14 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r9), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r9), %ymm12 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3] -; AVX2-FAST-PERLANE-NEXT: vpbroadcastq %xmm1, %ymm1 -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[1],ymm9[1],ymm0[2],ymm9[2],ymm0[3],ymm9[3],ymm0[8],ymm9[8],ymm0[9],ymm9[9],ymm0[10],ymm9[10],ymm0[11],ymm9[11] -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, %ymm13 -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rsp), %ymm7 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm7[0],ymm10[1],ymm7[1],ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[8],ymm7[8],ymm10[9],ymm7[9],ymm10[10],ymm7[10],ymm10[11],ymm7[11] -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm10, %ymm9 -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm12[0],ymm15[1],ymm12[1],ymm15[2],ymm12[2],ymm15[3],ymm12[3],ymm15[8],ymm12[8],ymm15[9],ymm12[9],ymm15[10],ymm12[10],ymm15[11],ymm12[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm14[4],ymm5[4],ymm14[5],ymm5[5],ymm14[6],ymm5[6],ymm14[7],ymm5[7],ymm14[12],ymm5[12],ymm14[13],ymm5[13],ymm14[14],ymm5[14],ymm14[15],ymm5[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,1,2,3,6,5,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm3 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[3,3,3,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm8 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[8],ymm6[8],ymm8[9],ymm6[9],ymm8[10],ymm6[10],ymm8[11],ymm6[11] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm10 = ymm14[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[8],ymm8[8],ymm10[9],ymm8[9],ymm10[10],ymm8[10],ymm10[11],ymm8[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0],ymm6[1],ymm8[2,3],ymm6[4],ymm8[5,6],ymm6[7] -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm8 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm10 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[8],ymm8[8],ymm10[9],ymm8[9],ymm10[10],ymm8[10],ymm10[11],ymm8[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2],ymm6[3,4],ymm8[5],ymm6[6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[8],ymm5[8],ymm14[9],ymm5[9],ymm14[10],ymm5[10],ymm14[11],ymm5[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm15[4],ymm12[4],ymm15[5],ymm12[5],ymm15[6],ymm12[6],ymm15[7],ymm12[7],ymm15[12],ymm12[12],ymm15[13],ymm12[13],ymm15[14],ymm12[14],ymm15[15],ymm12[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm9[4],ymm7[4],ymm9[5],ymm7[5],ymm9[6],ymm7[6],ymm9[7],ymm7[7],ymm9[12],ymm7[12],ymm9[13],ymm7[13],ymm9[14],ymm7[14],ymm9[15],ymm7[15] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm13[4],ymm4[5],ymm13[5],ymm4[6],ymm13[6],ymm4[7],ymm13[7],ymm4[12],ymm13[12],ymm4[13],ymm13[13],ymm4[14],ymm13[14],ymm4[15],ymm13[15] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm13, %ymm3 -; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm4, %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm3 = ymm12[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm4 = ymm15[1,2,2,3,5,6,6,7] -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm3 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} ymm4 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero -; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11] -; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2] -; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 128(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 160(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 288(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 320(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm11, 352(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 96(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 64(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 224(%rax) -; AVX2-FAST-PERLANE-NEXT: addq $168, %rsp -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-FAST-LABEL: vf32: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0 +; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm3 +; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm2 +; AVX2-FAST-NEXT: vmovdqa 32(%r9), %ymm4 +; AVX2-FAST-NEXT: vmovdqa (%r9), %xmm1 +; AVX2-FAST-NEXT: vmovdqa 32(%r9), %xmm5 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[0,0,2,1,4,5,6,7] +; AVX2-FAST-NEXT: vpbroadcastq %xmm6, %ymm6 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm0, %ymm6, %ymm9 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} ymm6 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2] +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm3, %ymm6, %ymm10 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,1,4,5,6,7] +; AVX2-FAST-NEXT: vpbroadcastq %xmm6, %ymm6 +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm0, %ymm6, %ymm11 +; AVX2-FAST-NEXT: vpshuflw {{.*#+}} ymm8 = ymm2[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2] +; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm3, %ymm8, %ymm8 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15] +; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm5, %xmm7 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm0, %ymm7, %ymm13 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm7 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9] +; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm5, %xmm5 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1] +; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm1, %xmm6 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm0, %ymm6, %ymm6 +; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm1, %xmm1 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = +; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm4, %ymm14 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,1,2,3] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm3, %ymm14, %ymm14 +; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm2, %ymm7 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm3, %ymm7, %ymm7 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm3, %ymm5, %ymm5 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm3, %ymm1, %ymm1 +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = +; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm4, %ymm4 +; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm4[2,2,2,3] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm0, %ymm3, %ymm3 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] +; AVX2-FAST-NEXT: vpblendvb %ymm12, %ymm0, %ymm2, %ymm0 +; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-NEXT: vmovdqa %ymm8, 96(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm0, 128(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm7, 160(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm11, 192(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm10, 288(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm14, 352(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm3, 320(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm9, (%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm6, 64(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm5, 224(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm13, 256(%rax) +; AVX2-FAST-NEXT: vzeroupper +; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: vf32: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm3 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm4 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rcx), %zmm2 -; AVX512-NEXT: vmovdqu64 (%r8), %zmm5 -; AVX512-NEXT: vmovdqu64 (%r9), %zmm6 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34] -; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm7 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37,0,32,3,35,0,0,1,33,4,36,0,0,2,34,5,37] -; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm0 -; AVX512-NEXT: movw $9362, %cx # imm = 0x2492 -; AVX512-NEXT: kmovd %ecx, %k1 -; AVX512-NEXT: vmovdqa32 %zmm7, %zmm0 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0,2,34,0,0,0,32,3,35,0,0,1,33,4,36,0,0] -; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm7 -; AVX512-NEXT: movw $18724, %cx # imm = 0x4924 -; AVX512-NEXT: kmovd %ecx, %k2 -; AVX512-NEXT: vmovdqa32 %zmm7, %zmm0 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,5,37,8,40,0,0,6,38,9,41,0,0,7,39,0,0,5,37,8,40,0,0,6,38,9,41,0,0,7,39] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42,5,37,8,40,0,0,6,38,9,41,0,0,7,39,10,42] -; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm7 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0,8,40,0,0,6,38,9,41,0,0,7,39,10,42,0,0] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm8 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm7 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [10,42,13,45,0,0,11,43,14,46,0,0,12,44,15,47,10,42,13,45,0,0,11,43,14,46,0,0,12,44,15,47] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm9 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm9 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0,13,45,0,0,11,43,14,46,0,0,12,44,15,47,0,0] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm8 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm9 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [16,48,19,51,0,0,17,49,20,52,0,0,18,50,21,53,16,48,19,51,0,0,17,49,20,52,0,0,18,50,21,53] -; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm10 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm10 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0,18,50,0,0,16,48,19,51,0,0,17,49,20,52,0,0] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm8 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm10 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,21,53,24,56,0,0,22,54,25,57,0,0,23,55,0,0,21,53,24,56,0,0,22,54,25,57,0,0,23,55] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [21,53,24,56,0,0,22,54,25,57,0,0,23,55,26,58,21,53,24,56,0,0,22,54,25,57,0,0,23,55,26,58] -; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm11 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm11 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0,24,56,0,0,22,54,25,57,0,0,23,55,26,58,0,0] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm8 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm11 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm4, %zmm3, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [26,58,29,61,0,0,27,59,30,62,0,0,28,60,31,63,26,58,29,61,0,0,27,59,30,62,0,0,28,60,31,63] -; AVX512-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm6, %zmm5, %zmm3 -; AVX512-NEXT: vmovdqa32 %zmm8, %zmm3 {%k1} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0,29,61,0,0,27,59,30,62,0,0,28,60,31,63,0,0] -; AVX512-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2w %zmm2, %zmm1, %zmm4 -; AVX512-NEXT: vmovdqa32 %zmm4, %zmm3 {%k2} -; AVX512-NEXT: vmovdqu64 %zmm3, 320(%rax) -; AVX512-NEXT: vmovdqu64 %zmm11, 256(%rax) -; AVX512-NEXT: vmovdqu64 %zmm10, 192(%rax) -; AVX512-NEXT: vmovdqu64 %zmm9, 128(%rax) -; AVX512-NEXT: vmovdqu64 %zmm7, 64(%rax) -; AVX512-NEXT: vmovdqu64 %zmm0, (%rax) +; AVX512-NEXT: vmovdqu64 (%r8), %zmm0 +; AVX512-NEXT: vmovdqu64 (%r9), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,32,6,7,8,9,10,33,12,13,14,15,16,34,18,19,20,21,22,35,24,25,26,27,28,36,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,37,4,5,6,7,8,38,10,11,12,13,14,39,16,17,18,19,20,40,22,23,24,25,26,41,28,29,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,42,2,3,4,5,6,43,8,9,10,11,12,44,14,15,16,17,18,45,20,21,22,23,24,46,26,27,28,29,30,47] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,48,6,7,8,9,10,49,12,13,14,15,16,50,18,19,20,21,22,51,24,25,26,27,28,52,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,53,4,5,6,7,8,54,10,11,12,13,14,55,16,17,18,19,20,56,22,23,24,25,26,57,28,29,30,31] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,58,2,3,4,5,6,59,8,9,10,11,12,60,14,15,16,17,18,61,20,21,22,23,24,62,26,27,28,29,30,63] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqu64 %zmm7, 320(%rax) +; AVX512-NEXT: vmovdqu64 %zmm6, 256(%rax) +; AVX512-NEXT: vmovdqu64 %zmm5, 192(%rax) +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <32 x i16>, <32 x i16>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll @@ -87,25 +87,19 @@ define void @store_i32_stride3_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr1, <4 x i32>* %in.vecptr2, <12 x i32>* %out.vec) nounwind { ; SSE-LABEL: store_i32_stride3_vf4: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm0 -; SSE-NEXT: movaps (%rsi), %xmm1 -; SSE-NEXT: movaps (%rdx), %xmm2 -; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3] -; SSE-NEXT: movaps %xmm0, %xmm4 -; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2] -; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3] -; SSE-NEXT: movaps %xmm2, %xmm5 -; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,2] -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, 16(%rcx) -; SSE-NEXT: movaps %xmm5, 32(%rcx) -; SSE-NEXT: movaps %xmm4, (%rcx) +; SSE-NEXT: movaps (%rsi), %xmm0 +; SSE-NEXT: movaps (%rdx), %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,2],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0,1,3] +; SSE-NEXT: movaps %xmm1, 16(%rcx) +; SSE-NEXT: movaps %xmm3, 32(%rcx) +; SSE-NEXT: movaps %xmm0, (%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride3_vf4: @@ -207,161 +201,66 @@ define void @store_i32_stride3_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr1, <8 x i32>* %in.vecptr2, <24 x i32>* %out.vec) nounwind { ; SSE-LABEL: store_i32_stride3_vf8: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm1 -; SSE-NEXT: movaps 16(%rdi), %xmm0 -; SSE-NEXT: movaps (%rsi), %xmm8 -; SSE-NEXT: movaps 16(%rsi), %xmm5 -; SSE-NEXT: movaps (%rdx), %xmm4 -; SSE-NEXT: movaps 16(%rdx), %xmm6 -; SSE-NEXT: movaps %xmm5, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3] -; SSE-NEXT: movaps %xmm6, %xmm2 -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm7[0,2] -; SSE-NEXT: movaps %xmm0, %xmm7 -; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1] -; SSE-NEXT: movaps %xmm6, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm5[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm7[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2] -; SSE-NEXT: movaps %xmm8, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm4[3,3] -; SSE-NEXT: movaps %xmm4, %xmm6 -; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2] +; SSE-NEXT: movaps (%rsi), %xmm0 +; SSE-NEXT: movaps 16(%rsi), %xmm1 +; SSE-NEXT: movaps (%rdx), %xmm2 +; SSE-NEXT: movaps 16(%rdx), %xmm3 +; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: movaps %xmm1, %xmm5 -; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm8[1] -; SSE-NEXT: movaps %xmm4, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm8[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,2] -; SSE-NEXT: movaps %xmm1, (%rcx) -; SSE-NEXT: movaps %xmm7, 16(%rcx) -; SSE-NEXT: movaps %xmm6, 32(%rcx) -; SSE-NEXT: movaps %xmm0, 48(%rcx) +; SSE-NEXT: movaps %xmm2, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm1[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm3[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,0],xmm3[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,0] +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,2],xmm2[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0,1,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2] +; SSE-NEXT: movaps %xmm0, (%rcx) +; SSE-NEXT: movaps %xmm6, 16(%rcx) +; SSE-NEXT: movaps %xmm4, 32(%rcx) +; SSE-NEXT: movaps %xmm5, 48(%rcx) ; SSE-NEXT: movaps %xmm3, 64(%rcx) -; SSE-NEXT: movaps %xmm2, 80(%rcx) +; SSE-NEXT: movaps %xmm1, 80(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride3_vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rsi), %xmm0 -; AVX1-NEXT: vmovaps (%rdi), %xmm1 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd (%rdx), %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vmovaps 16(%rdx), %xmm1 -; AVX1-NEXT: vmovaps 16(%rsi), %xmm2 -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vbroadcastsd 24(%rdi), %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vmovaps %ymm2, 32(%rcx) -; AVX1-NEXT: vmovaps %ymm1, 64(%rcx) +; AVX1-NEXT: vmovapd (%rsi), %ymm0 +; AVX1-NEXT: vmovapd (%rdx), %ymm1 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7] +; AVX1-NEXT: vbroadcastsd (%rdx), %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] ; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vmovaps %ymm1, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm2, 64(%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: store_i32_stride3_vf8: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm1 -; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm2 -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm4 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm5 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm4, 64(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm3, (%rcx) -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-ALL-LABEL: store_i32_stride3_vf8: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-ALL-NEXT: vmovaps (%rsi), %ymm1 -; AVX2-FAST-ALL-NEXT: vmovaps (%rdx), %ymm2 -; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm3, %ymm3 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rdx), %ymm4 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u> -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm4, %ymm1 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rdi), %ymm2 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps %ymm1, 64(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm3, (%rcx) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf8: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 64(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, (%rcx) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-LABEL: store_i32_stride3_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rsi), %ymm0 +; AVX2-NEXT: vmovaps (%rdx), %ymm1 +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps %ymm1, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm2, 32(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i32_stride3_vf8: ; AVX512: # %bb.0: @@ -393,313 +292,130 @@ define void @store_i32_stride3_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vecptr1, <16 x i32>* %in.vecptr2, <48 x i32>* %out.vec) nounwind { ; SSE-LABEL: store_i32_stride3_vf16: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm3 -; SSE-NEXT: movaps 16(%rdi), %xmm2 -; SSE-NEXT: movaps 32(%rdi), %xmm1 -; SSE-NEXT: movaps 48(%rdi), %xmm15 -; SSE-NEXT: movaps (%rsi), %xmm8 -; SSE-NEXT: movaps 16(%rsi), %xmm10 -; SSE-NEXT: movaps 32(%rsi), %xmm14 -; SSE-NEXT: movaps 48(%rsi), %xmm0 -; SSE-NEXT: movaps (%rdx), %xmm12 -; SSE-NEXT: movaps 16(%rdx), %xmm4 -; SSE-NEXT: movaps 32(%rdx), %xmm5 -; SSE-NEXT: movaps 48(%rdx), %xmm6 -; SSE-NEXT: movaps %xmm0, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3] -; SSE-NEXT: movaps %xmm6, %xmm9 -; SSE-NEXT: unpckhps {{.*#+}} xmm9 = xmm9[2],xmm15[2],xmm9[3],xmm15[3] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm7[0,2] -; SSE-NEXT: movaps %xmm15, %xmm7 -; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1] -; SSE-NEXT: movaps %xmm6, %xmm11 -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm0[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm7[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm15[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm6[0,2] -; SSE-NEXT: movaps %xmm14, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3] -; SSE-NEXT: movaps %xmm5, %xmm13 -; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3] -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm0[0,2] -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm14[1] -; SSE-NEXT: movaps %xmm5, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm14[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0,2] -; SSE-NEXT: movaps %xmm10, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm4[3,3] -; SSE-NEXT: movaps %xmm4, %xmm14 -; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm2[2],xmm14[3],xmm2[3] -; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1] -; SSE-NEXT: movaps %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm10[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm2[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0,2] -; SSE-NEXT: movaps %xmm8, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm12[3,3] -; SSE-NEXT: movaps %xmm12, %xmm4 -; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2] -; SSE-NEXT: movaps %xmm3, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1] -; SSE-NEXT: movaps %xmm12, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm8[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm3[1,3] -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm12[0,2] -; SSE-NEXT: movaps %xmm3, (%rcx) -; SSE-NEXT: movaps %xmm6, 16(%rcx) -; SSE-NEXT: movaps %xmm4, 32(%rcx) -; SSE-NEXT: movaps %xmm2, 48(%rcx) -; SSE-NEXT: movaps %xmm5, 64(%rcx) -; SSE-NEXT: movaps %xmm14, 80(%rcx) -; SSE-NEXT: movaps %xmm1, 96(%rcx) -; SSE-NEXT: movaps %xmm7, 112(%rcx) -; SSE-NEXT: movaps %xmm13, 128(%rcx) -; SSE-NEXT: movaps %xmm15, 144(%rcx) +; SSE-NEXT: movaps (%rsi), %xmm13 +; SSE-NEXT: movaps 16(%rsi), %xmm4 +; SSE-NEXT: movaps 32(%rsi), %xmm0 +; SSE-NEXT: movaps 48(%rsi), %xmm14 +; SSE-NEXT: movaps (%rdx), %xmm10 +; SSE-NEXT: movaps 16(%rdx), %xmm12 +; SSE-NEXT: movaps 32(%rdx), %xmm8 +; SSE-NEXT: movaps 48(%rdx), %xmm5 +; SSE-NEXT: movaps %xmm14, %xmm9 +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,2],xmm5[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0,1,3] +; SSE-NEXT: movaps %xmm5, %xmm11 +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm4[3,0] +; SSE-NEXT: movaps %xmm4, %xmm7 +; SSE-NEXT: movaps %xmm10, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0,2] +; SSE-NEXT: movaps %xmm13, %xmm5 +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,2],xmm8[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0,1,3] +; SSE-NEXT: movaps %xmm14, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm8[1,0] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm14[2,3] +; SSE-NEXT: movaps %xmm0, %xmm1 +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm8[0,0] +; SSE-NEXT: movaps %xmm0, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm1[2,0] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,2],xmm12[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0,1,3] +; SSE-NEXT: movaps %xmm12, %xmm1 +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm13[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm13[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,0],xmm14[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm12[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm10[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] +; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,0],xmm13[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm10[0,2] +; SSE-NEXT: movaps %xmm13, (%rcx) +; SSE-NEXT: movaps %xmm3, 16(%rcx) +; SSE-NEXT: movaps %xmm0, 32(%rcx) +; SSE-NEXT: movaps %xmm14, 48(%rcx) +; SSE-NEXT: movaps %xmm1, 64(%rcx) +; SSE-NEXT: movaps %xmm7, 80(%rcx) +; SSE-NEXT: movaps %xmm6, 96(%rcx) +; SSE-NEXT: movaps %xmm2, 112(%rcx) +; SSE-NEXT: movaps %xmm5, 128(%rcx) +; SSE-NEXT: movaps %xmm4, 144(%rcx) ; SSE-NEXT: movaps %xmm11, 160(%rcx) ; SSE-NEXT: movaps %xmm9, 176(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride3_vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rsi), %xmm0 -; AVX1-NEXT: vmovaps (%rdi), %xmm1 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm2 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm1[1],xmm0[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm0[1,1],xmm3[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd (%rdx), %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vmovaps 48(%rdx), %xmm1 -; AVX1-NEXT: vmovaps 48(%rsi), %xmm3 -; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm3[3,0],xmm1[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm1[2,1],xmm4[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 -; AVX1-NEXT: vbroadcastsd 56(%rdi), %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX1-NEXT: vmovaps 32(%rsi), %xmm3 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm2[1],xmm3[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm3[1,1],xmm4[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,0],xmm2[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 -; AVX1-NEXT: vbroadcastsd 32(%rdx), %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vmovaps 16(%rdx), %xmm3 -; AVX1-NEXT: vmovaps 16(%rsi), %xmm4 -; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm3[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm3[2,1],xmm5[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 -; AVX1-NEXT: vbroadcastsd 24(%rdi), %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm5 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm5 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm6 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6],ymm5[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7] -; AVX1-NEXT: vmovaps %ymm5, 32(%rcx) -; AVX1-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX1-NEXT: vmovaps %ymm3, 64(%rcx) -; AVX1-NEXT: vmovaps %ymm2, 96(%rcx) -; AVX1-NEXT: vmovaps %ymm1, 160(%rcx) +; AVX1-NEXT: vmovapd (%rsi), %ymm0 +; AVX1-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX1-NEXT: vmovapd (%rdx), %ymm2 +; AVX1-NEXT: vmovapd 32(%rdx), %ymm3 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0],ymm4[1],ymm1[2,3],ymm4[4],ymm1[5,6],ymm4[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm5 = ymm5[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = ymm3[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7] +; AVX1-NEXT: vbroadcastsd (%rdx), %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7] +; AVX1-NEXT: vbroadcastsd 32(%rdx), %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7] +; AVX1-NEXT: vmovaps %ymm1, 96(%rcx) ; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vmovaps %ymm2, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm3, 128(%rcx) +; AVX1-NEXT: vmovaps %ymm5, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm4, 160(%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: store_i32_stride3_vf16: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm1 -; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm4 -; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm0 -; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm5 -; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm3 -; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm6 -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm7 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm6[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm8 = ymm5[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7] -; AVX2-SLOW-NEXT: vbroadcastsd 56(%rdi), %ymm8 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm4[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7] -; AVX2-SLOW-NEXT: vbroadcastsd 32(%rdx), %ymm9 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm9 = ymm3[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm10 = ymm0[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7] -; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm10 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm9, 64(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm8, 96(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm7, 160(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rcx) -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-ALL-LABEL: store_i32_stride3_vf16: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rdi), %ymm1 -; AVX2-FAST-ALL-NEXT: vmovaps (%rsi), %ymm2 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rsi), %ymm3 -; AVX2-FAST-ALL-NEXT: vmovaps (%rdx), %ymm4 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rdx), %ymm5 -; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm6 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermps %ymm2, %ymm6, %ymm7 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm8 = ymm0[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rdx), %ymm8 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm8 = [5,6,5,6,5,6,7,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm3, %ymm8, %ymm9 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm10 = ymm5[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 56(%rdi), %ymm10 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm10 = ymm1[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6],ymm10[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm10 = ymm3[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2],ymm5[3,4],ymm10[5],ymm5[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm3, %ymm6, %ymm3 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 32(%rdx), %ymm3 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm4[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm3 = ymm2[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm2, %ymm8, %ymm2 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm4[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rdi), %ymm3 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps %ymm2, 64(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm1, 96(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm5, 128(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm9, 160(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm7, (%rcx) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf16: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm1 -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm3 -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm2 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm7 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm6[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm8 = ymm5[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rdi), %ymm8 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm8 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm4[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 32(%rdx), %ymm9 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm3[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm10 = ymm0[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm10 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6],ymm4[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 64(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 96(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 160(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rcx) -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-LABEL: store_i32_stride3_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rsi), %ymm0 +; AVX2-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX2-NEXT: vmovaps (%rdx), %ymm2 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm3 +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm3[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7] +; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7] +; AVX2-NEXT: vmovaps %ymm1, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps %ymm2, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm5, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm3, 160(%rcx) +; AVX2-NEXT: vmovaps %ymm4, 128(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i32_stride3_vf16: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,16,u,1,17,u,2,18,u,3,19,u,4,20,u,5> +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15] ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15] -; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,21,u,6,22,u,7,23,u,8,24,u,9,25,u,10> -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,22,3,4,23,6,7,24,9,10,25,12,13,26,15] -; AVX512-NEXT: vpermi2d %zmm0, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,27,u,11,28,u,12,29,u,13,30,u,14,31,u,15> -; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,27,3,4,28,6,7,29,9,10,30,12,13,31,15] -; AVX512-NEXT: vpermi2d %zmm1, %zmm3, %zmm0 -; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rcx) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rcx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <16 x i32>, <16 x i32>* %in.vecptr0, align 32 @@ -720,641 +436,262 @@ ; SSE-LABEL: store_i32_stride3_vf32: ; SSE: # %bb.0: ; SSE-NEXT: subq $56, %rsp -; SSE-NEXT: movaps 64(%rdi), %xmm12 -; SSE-NEXT: movaps (%rdi), %xmm2 -; SSE-NEXT: movaps 16(%rdi), %xmm10 -; SSE-NEXT: movaps 32(%rdi), %xmm9 -; SSE-NEXT: movaps 48(%rdi), %xmm8 -; SSE-NEXT: movaps (%rsi), %xmm7 -; SSE-NEXT: movaps 16(%rsi), %xmm3 -; SSE-NEXT: movaps 32(%rsi), %xmm5 -; SSE-NEXT: movaps 48(%rsi), %xmm6 -; SSE-NEXT: movaps (%rdx), %xmm1 -; SSE-NEXT: movaps 16(%rdx), %xmm11 -; SSE-NEXT: movaps 32(%rdx), %xmm13 -; SSE-NEXT: movaps 48(%rdx), %xmm14 -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[1,3] -; SSE-NEXT: movaps %xmm2, %xmm4 -; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2] -; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm2, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1] -; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm7[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm1[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm7[0,2] -; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill -; SSE-NEXT: movaps %xmm11, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[1,3] -; SSE-NEXT: movaps %xmm10, %xmm2 -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm10, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] -; SSE-NEXT: movaps %xmm11, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm11[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm10[2],xmm11[3],xmm10[3] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm3[0,2] -; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm13, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[1,3] -; SSE-NEXT: movaps %xmm9, %xmm2 -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm9, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1] -; SSE-NEXT: movaps %xmm13, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm13[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm9[2],xmm13[3],xmm9[3] -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm5[0,2] -; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm14, %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm8[1,3] +; SSE-NEXT: movaps 16(%rsi), %xmm10 +; SSE-NEXT: movaps 32(%rsi), %xmm1 +; SSE-NEXT: movaps 64(%rsi), %xmm8 +; SSE-NEXT: movaps 80(%rsi), %xmm12 +; SSE-NEXT: movaps 112(%rsi), %xmm0 +; SSE-NEXT: movaps 80(%rdx), %xmm15 +; SSE-NEXT: movaps 96(%rdx), %xmm4 +; SSE-NEXT: movaps 112(%rdx), %xmm5 +; SSE-NEXT: movaps %xmm10, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,2],xmm15[2,3] +; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,0],xmm0[3,0] +; SSE-NEXT: movaps %xmm0, %xmm13 +; SSE-NEXT: movaps %xmm0, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,2],xmm5[2,3] +; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm12[3,0] +; SSE-NEXT: movaps %xmm12, %xmm6 +; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2] +; SSE-NEXT: movaps %xmm6, (%rsp) # 16-byte Spill ; SSE-NEXT: movaps %xmm8, %xmm2 -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,2],xmm4[2,3] ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm8, %xmm0 -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] -; SSE-NEXT: movaps %xmm14, %xmm15 -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm6[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[2,0],xmm0[0,2] -; SSE-NEXT: movaps 64(%rsi), %xmm0 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm14[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm8[2],xmm14[3],xmm8[3] -; SSE-NEXT: movaps 64(%rdx), %xmm10 -; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm6[0,2] -; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm10, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm12[1,3] -; SSE-NEXT: movaps %xmm12, %xmm14 -; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm1[0,2] -; SSE-NEXT: movaps %xmm12, %xmm1 -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE-NEXT: movaps %xmm10, %xmm13 -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,0],xmm0[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0],xmm1[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm10[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3] -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm0[0,2] -; SSE-NEXT: movaps 80(%rdi), %xmm3 -; SSE-NEXT: movaps 80(%rdx), %xmm12 -; SSE-NEXT: movaps %xmm12, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1,3] -; SSE-NEXT: movaps 80(%rsi), %xmm0 -; SSE-NEXT: movaps %xmm3, %xmm11 -; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[0,2] -; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE-NEXT: movaps %xmm12, %xmm9 -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm0[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm1[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm12[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm3[2],xmm12[3],xmm3[3] -; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm0[0,2] -; SSE-NEXT: movaps 96(%rdi), %xmm3 -; SSE-NEXT: movaps 96(%rdx), %xmm6 -; SSE-NEXT: movaps %xmm6, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3] -; SSE-NEXT: movaps 96(%rsi), %xmm0 -; SSE-NEXT: movaps %xmm3, %xmm8 -; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm2[0,2] -; SSE-NEXT: movaps %xmm3, %xmm2 -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-NEXT: movaps %xmm4, %xmm14 +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm1[3,0] +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0,2] +; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 32(%rdx), %xmm9 +; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,0],xmm9[1,0] +; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm15[0,2] +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 96(%rsi), %xmm15 +; SSE-NEXT: movaps 64(%rdx), %xmm0 +; SSE-NEXT: movaps %xmm15, %xmm11 +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,2],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,0],xmm0[1,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm8[3,0] +; SSE-NEXT: movaps 16(%rdx), %xmm6 ; SSE-NEXT: movaps %xmm6, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm0[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm2[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm6[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm0[0,2] -; SSE-NEXT: movaps 112(%rdi), %xmm5 -; SSE-NEXT: movaps 112(%rdx), %xmm2 -; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[1,3] -; SSE-NEXT: movaps 112(%rsi), %xmm0 -; SSE-NEXT: movaps %xmm5, %xmm4 -; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2] -; SSE-NEXT: movaps %xmm5, %xmm3 -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] -; SSE-NEXT: movaps %xmm2, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[1,0] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm2[3,3] -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2] -; SSE-NEXT: movaps %xmm2, 368(%rcx) -; SSE-NEXT: movaps %xmm1, 352(%rcx) -; SSE-NEXT: movaps %xmm4, 336(%rcx) -; SSE-NEXT: movaps %xmm6, 320(%rcx) -; SSE-NEXT: movaps %xmm7, 304(%rcx) -; SSE-NEXT: movaps %xmm8, 288(%rcx) -; SSE-NEXT: movaps %xmm12, 272(%rcx) -; SSE-NEXT: movaps %xmm9, 256(%rcx) -; SSE-NEXT: movaps %xmm11, 240(%rcx) -; SSE-NEXT: movaps %xmm10, 224(%rcx) -; SSE-NEXT: movaps %xmm13, 208(%rcx) -; SSE-NEXT: movaps %xmm14, 192(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 176(%rcx) -; SSE-NEXT: movaps %xmm15, 160(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm8[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm8[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[0,2] +; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 48(%rsi), %xmm1 +; SSE-NEXT: movaps 48(%rdx), %xmm0 +; SSE-NEXT: movaps %xmm1, %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,2],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[1,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm10[3,0] +; SSE-NEXT: movaps (%rdx), %xmm4 +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm10[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm10[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm0[0,2] ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3] +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps (%rsi), %xmm0 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,2],xmm9[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,0],xmm15[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm9[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm1[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm1[2,3] +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload +; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm9[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,2],xmm6[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm1[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0,2] +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[2,3] +; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,2],xmm4[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm0[3,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[0,2] +; SSE-NEXT: movaps %xmm0, (%rcx) +; SSE-NEXT: movaps %xmm2, 16(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0,1,3] +; SSE-NEXT: movaps %xmm6, 32(%rcx) +; SSE-NEXT: movaps %xmm1, 48(%rcx) +; SSE-NEXT: movaps %xmm7, 64(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0,1,3] +; SSE-NEXT: movaps %xmm9, 80(%rcx) +; SSE-NEXT: movaps %xmm15, 96(%rcx) +; SSE-NEXT: movaps %xmm13, 112(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0,1,3] +; SSE-NEXT: movaps %xmm3, 128(%rcx) +; SSE-NEXT: movaps %xmm10, 144(%rcx) +; SSE-NEXT: movaps %xmm5, 160(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0,1,3] +; SSE-NEXT: movaps %xmm8, 176(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 128(%rcx) +; SSE-NEXT: movaps %xmm0, 192(%rcx) +; SSE-NEXT: movaps %xmm12, 208(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0,1,3] +; SSE-NEXT: movaps %xmm11, 224(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%rcx) +; SSE-NEXT: movaps %xmm0, 240(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 96(%rcx) +; SSE-NEXT: movaps %xmm0, 256(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 80(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] +; SSE-NEXT: movaps %xmm0, 272(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%rcx) +; SSE-NEXT: movaps %xmm0, 288(%rcx) +; SSE-NEXT: movaps %xmm14, 304(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] +; SSE-NEXT: movaps %xmm0, 320(%rcx) ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%rcx) +; SSE-NEXT: movaps %xmm0, 336(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%rcx) +; SSE-NEXT: movaps %xmm0, 352(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%rcx) +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3] +; SSE-NEXT: movaps %xmm0, 368(%rcx) ; SSE-NEXT: addq $56, %rsp ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride3_vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rsi), %xmm0 -; AVX1-NEXT: vmovaps (%rdi), %xmm1 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm5 -; AVX1-NEXT: vmovaps 64(%rdi), %xmm4 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd (%rdx), %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vmovaps 80(%rdx), %xmm1 -; AVX1-NEXT: vmovaps 80(%rsi), %xmm2 -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vbroadcastsd 88(%rdi), %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7] -; AVX1-NEXT: vmovaps 48(%rdx), %xmm2 -; AVX1-NEXT: vmovaps 48(%rsi), %xmm3 -; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm3[3,0],xmm2[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm2[2,1],xmm6[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2 -; AVX1-NEXT: vbroadcastsd 56(%rdi), %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX1-NEXT: vmovaps 112(%rdx), %xmm3 -; AVX1-NEXT: vmovaps 112(%rsi), %xmm6 -; AVX1-NEXT: vshufps {{.*#+}} xmm7 = xmm6[3,0],xmm3[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm7 = xmm3[2,1],xmm7[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm6[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm6[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm3, %ymm3 -; AVX1-NEXT: vbroadcastsd 120(%rdi), %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7] -; AVX1-NEXT: vmovaps 64(%rsi), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm4[1],xmm6[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm7 = xmm6[1,1],xmm7[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,0],xmm4[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm4, %ymm4 -; AVX1-NEXT: vbroadcastsd 64(%rdx), %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7] -; AVX1-NEXT: vmovaps 32(%rsi), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm5[1],xmm6[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm7 = xmm6[1,1],xmm7[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm6[2,0],xmm5[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5 -; AVX1-NEXT: vbroadcastsd 32(%rdx), %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7] -; AVX1-NEXT: vmovaps 96(%rsi), %xmm6 -; AVX1-NEXT: vmovaps 96(%rdi), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm7[1],xmm6[1] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm6[1,1],xmm0[0,2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,0],xmm7[2,1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm6, %ymm0 -; AVX1-NEXT: vbroadcastsd 96(%rdx), %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7] -; AVX1-NEXT: vmovaps 16(%rdx), %xmm0 -; AVX1-NEXT: vmovaps 16(%rsi), %xmm7 -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm7[3,0],xmm0[3,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,1],xmm1[0,2] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0],xmm7[1,0] -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd 24(%rdi), %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm7 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6],ymm1[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm7 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm7[2],ymm1[3,4],ymm7[5],ymm1[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6],ymm7[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm10 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm11 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5,6],ymm10[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm11 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm11 = mem[1,0,2,2] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm12 = mem[1,1,2,2] -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6],ymm11[7] -; AVX1-NEXT: vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4],ymm12[5],ymm11[6,7] -; AVX1-NEXT: vmovaps %ymm11, 32(%rcx) -; AVX1-NEXT: vmovaps %ymm10, 320(%rcx) -; AVX1-NEXT: vmovaps %ymm7, 128(%rcx) -; AVX1-NEXT: vmovaps %ymm1, 224(%rcx) -; AVX1-NEXT: vmovaps %ymm0, 64(%rcx) -; AVX1-NEXT: vmovaps %ymm6, 288(%rcx) +; AVX1-NEXT: vmovaps (%rsi), %ymm2 +; AVX1-NEXT: vmovaps 32(%rsi), %ymm3 +; AVX1-NEXT: vmovaps 64(%rsi), %ymm4 +; AVX1-NEXT: vmovaps 96(%rsi), %ymm5 +; AVX1-NEXT: vmovaps (%rdx), %ymm6 +; AVX1-NEXT: vmovaps 32(%rdx), %ymm7 +; AVX1-NEXT: vmovapd 64(%rdx), %ymm8 +; AVX1-NEXT: vmovaps 96(%rdx), %ymm9 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm8[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2,3],ymm1[4],ymm5[5,6],ymm1[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = ymm10[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0],ymm10[1],ymm3[2,3],ymm10[4],ymm3[5,6],ymm10[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm6[2,3,2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm11 = ymm11[0,0,3,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6],ymm11[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = ymm8[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm5[1,2],ymm8[3],ymm5[4,5],ymm8[6],ymm5[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm9 = ymm9[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm4[1,2],ymm9[3],ymm4[4,5],ymm9[6],ymm4[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm7 = ymm7[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm2[1,2],ymm7[3],ymm2[4,5],ymm7[6],ymm2[7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm6 = ymm6[1,1,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm3[1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7] +; AVX1-NEXT: vbroadcastsd (%rdx), %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7] +; AVX1-NEXT: vbroadcastsd 64(%rdx), %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7] +; AVX1-NEXT: vbroadcastsd 96(%rdx), %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7] +; AVX1-NEXT: vbroadcastsd 32(%rdx), %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm12[2],ymm5[3,4],ymm12[5],ymm5[6,7] ; AVX1-NEXT: vmovaps %ymm5, 96(%rcx) +; AVX1-NEXT: vmovaps %ymm3, 288(%rcx) ; AVX1-NEXT: vmovaps %ymm4, 192(%rcx) -; AVX1-NEXT: vmovaps %ymm3, 352(%rcx) -; AVX1-NEXT: vmovaps %ymm2, 160(%rcx) -; AVX1-NEXT: vmovaps %ymm9, 256(%rcx) -; AVX1-NEXT: vmovaps %ymm8, (%rcx) +; AVX1-NEXT: vmovaps %ymm2, (%rcx) +; AVX1-NEXT: vmovaps %ymm6, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm7, 128(%rcx) +; AVX1-NEXT: vmovaps %ymm9, 320(%rcx) +; AVX1-NEXT: vmovaps %ymm8, 224(%rcx) +; AVX1-NEXT: vmovaps %ymm11, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm10, 160(%rcx) +; AVX1-NEXT: vmovaps %ymm1, 352(%rcx) +; AVX1-NEXT: vmovaps %ymm0, 256(%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-SLOW-LABEL: store_i32_stride3_vf32: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: subq $40, %rsp -; AVX2-SLOW-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovaps 32(%rdi), %ymm4 -; AVX2-SLOW-NEXT: vmovaps 64(%rdi), %ymm8 -; AVX2-SLOW-NEXT: vmovaps 32(%rsi), %ymm5 -; AVX2-SLOW-NEXT: vmovaps 64(%rsi), %ymm9 -; AVX2-SLOW-NEXT: vmovaps 96(%rsi), %ymm2 -; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm10 -; AVX2-SLOW-NEXT: vmovaps 64(%rdx), %ymm12 -; AVX2-SLOW-NEXT: vmovaps 96(%rdx), %ymm6 -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-SLOW-NEXT: vbroadcastsd (%rdx), %ymm3 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX2-SLOW-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm12[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm7 = ymm9[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7] -; AVX2-SLOW-NEXT: vbroadcastsd 88(%rdi), %ymm7 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm10[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm11 = ymm5[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6],ymm7[7] -; AVX2-SLOW-NEXT: vbroadcastsd 56(%rdi), %ymm11 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7] -; AVX2-SLOW-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm6[2,1,3,3] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm13 = ymm2[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6],ymm11[7] -; AVX2-SLOW-NEXT: vbroadcastsd 120(%rdi), %ymm13 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm13 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm8[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7] -; AVX2-SLOW-NEXT: vbroadcastsd 64(%rdx), %ymm14 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm14 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,0,1] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm4[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6],ymm14[7] -; AVX2-SLOW-NEXT: vbroadcastsd 32(%rdx), %ymm15 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm15 = mem[1,0,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1] -; AVX2-SLOW-NEXT: vmovaps 96(%rdi), %ymm2 -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7] -; AVX2-SLOW-NEXT: vbroadcastsd 96(%rdx), %ymm15 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7] -; AVX2-SLOW-NEXT: vmovaps (%rsi), %ymm15 -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm3 = ymm15[1,2,3,3,5,6,7,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-SLOW-NEXT: vmovaps (%rdx), %ymm0 -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm0[2,1,3,3] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6],ymm7[7] -; AVX2-SLOW-NEXT: vbroadcastsd 24(%rdi), %ymm7 -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm8[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7] -; AVX2-SLOW-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: # ymm5 = mem[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7] -; AVX2-SLOW-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: # ymm5 = mem[1,1,2,2] -; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7] -; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm15[0,0,3,3,4,4,7,7] -; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7] -; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm2, 320(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm7, 224(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm3, 64(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm1, 288(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm14, 96(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm13, 192(%rcx) -; AVX2-SLOW-NEXT: vmovaps %ymm11, 352(%rcx) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 160(%rcx) -; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rcx) -; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rcx) -; AVX2-SLOW-NEXT: addq $40, %rsp -; AVX2-SLOW-NEXT: vzeroupper -; AVX2-SLOW-NEXT: retq -; -; AVX2-FAST-ALL-LABEL: store_i32_stride3_vf32: -; AVX2-FAST-ALL: # %bb.0: -; AVX2-FAST-ALL-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rdi), %ymm5 -; AVX2-FAST-ALL-NEXT: vmovaps 64(%rdi), %ymm9 -; AVX2-FAST-ALL-NEXT: vmovaps (%rsi), %ymm1 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rsi), %ymm7 -; AVX2-FAST-ALL-NEXT: vmovaps 64(%rsi), %ymm11 -; AVX2-FAST-ALL-NEXT: vmovaps 96(%rsi), %ymm6 -; AVX2-FAST-ALL-NEXT: vmovaps 32(%rdx), %ymm12 -; AVX2-FAST-ALL-NEXT: vmovaps 64(%rdx), %ymm4 -; AVX2-FAST-ALL-NEXT: vmovaps 96(%rdx), %ymm15 -; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm10 = [1,0,2,2,1,0,2,2] -; AVX2-FAST-ALL-NEXT: # ymm10 = mem[0,1,0,1] -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm10, %ymm2 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rdx), %ymm3 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm9[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm8 = ymm4[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3],ymm3[4],ymm8[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm8 = ymm11[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm8[2],ymm3[3,4],ymm8[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm13 = [5,6,5,6,5,6,7,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm11, %ymm13, %ymm8 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 88(%rdi), %ymm8 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm8[2],ymm4[3,4],ymm8[5],ymm4[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm7, %ymm13, %ymm8 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm14 = ymm12[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm14[1],ymm8[2,3],ymm14[4],ymm8[5,6],ymm14[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 56(%rdi), %ymm14 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm14[2],ymm8[3,4],ymm14[5],ymm8[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm14 = ymm5[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5,6],ymm14[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm14 = ymm7[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm6, %ymm13, %ymm14 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm15[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm14[0],ymm2[1],ymm14[2,3],ymm2[4],ymm14[5,6],ymm2[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 120(%rdi), %ymm14 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm14 = ymm2[0,1],ymm14[2],ymm2[3,4],ymm14[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm15[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vmovaps 96(%rdi), %ymm15 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm15[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm3 = ymm6[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm11, %ymm10, %ymm3 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6],ymm3[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 64(%rdx), %ymm9 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm9[2],ymm3[3,4],ymm9[5],ymm3[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm7, %ymm10, %ymm7 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2,3],ymm7[4],ymm5[5,6],ymm7[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 32(%rdx), %ymm7 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3,4],ymm7[5],ymm5[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm6, %ymm10, %ymm6 -; AVX2-FAST-ALL-NEXT: vmovaps (%rdx), %ymm7 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm9 = ymm15[0,0,2,1] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0],ymm6[1],ymm9[2,3],ymm6[4],ymm9[5,6],ymm6[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 96(%rdx), %ymm9 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2],ymm6[3,4],ymm9[5],ymm6[6,7] -; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm13, %ymm9 -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm10 = ymm7[2,1,3,3] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7] -; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rdi), %ymm10 -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6],ymm0[7] -; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] -; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm9, 64(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm6, 288(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm5, 96(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm3, 192(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm2, 320(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm14, 352(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm12, 128(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm8, 160(%rcx) -; AVX2-FAST-ALL-NEXT: vmovaps %ymm4, 256(%rcx) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 224(%rcx) -; AVX2-FAST-ALL-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, (%rcx) -; AVX2-FAST-ALL-NEXT: vzeroupper -; AVX2-FAST-ALL-NEXT: retq -; -; AVX2-FAST-PERLANE-LABEL: store_i32_stride3_vf32: -; AVX2-FAST-PERLANE: # %bb.0: -; AVX2-FAST-PERLANE-NEXT: subq $40, %rsp -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdi), %ymm4 -; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdi), %ymm8 -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rsi), %ymm5 -; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rsi), %ymm9 -; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rsi), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm10 -; AVX2-FAST-PERLANE-NEXT: vmovaps 64(%rdx), %ymm12 -; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdx), %ymm6 -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm1 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rdx), %ymm3 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm12[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm7 = ymm9[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 88(%rdi), %ymm7 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm10[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm11 = ymm5[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6],ymm7[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 56(%rdi), %ymm11 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4],ymm11[5],ymm7[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm6[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm13 = ymm2[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6],ymm11[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 120(%rdi), %ymm13 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm13 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm8[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3],ymm13[4],ymm14[5,6],ymm13[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 64(%rdx), %ymm14 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm14 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm4[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6],ymm14[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 32(%rdx), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm15 = mem[1,0,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,0,1] -; AVX2-FAST-PERLANE-NEXT: vmovaps 96(%rdi), %ymm2 -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[0,0,2,1] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 96(%rdx), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rsi), %ymm15 -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm3 = ymm15[1,2,3,3,5,6,7,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-FAST-PERLANE-NEXT: vmovaps (%rdx), %ymm0 -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm0[2,1,3,3] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6],ymm7[7] -; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rdi), %ymm7 -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4],ymm7[5],ymm3[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm8[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm12[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm8 = ymm9[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm8 = ymm10[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5,6],ymm4[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm6[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6],ymm2[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7] -; AVX2-FAST-PERLANE-NEXT: vpermpd $165, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload -; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5,6],ymm5[7] -; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm15[0,0,3,3,4,4,7,7] -; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7] -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 320(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 224(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 64(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 288(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm14, 96(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm13, 192(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 352(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 160(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rcx) -; AVX2-FAST-PERLANE-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rcx) -; AVX2-FAST-PERLANE-NEXT: addq $40, %rsp -; AVX2-FAST-PERLANE-NEXT: vzeroupper -; AVX2-FAST-PERLANE-NEXT: retq +; AVX2-LABEL: store_i32_stride3_vf32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rsi), %ymm1 +; AVX2-NEXT: vmovaps 32(%rsi), %ymm2 +; AVX2-NEXT: vmovaps 64(%rsi), %ymm3 +; AVX2-NEXT: vmovaps 96(%rsi), %ymm4 +; AVX2-NEXT: vmovaps (%rdx), %ymm5 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm6 +; AVX2-NEXT: vmovaps 64(%rdx), %ymm7 +; AVX2-NEXT: vmovaps 96(%rdx), %ymm8 +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm7[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0],ymm4[1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm8[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0],ymm9[1],ymm4[2,3],ymm9[4],ymm4[5,6],ymm9[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0],ymm3[1,2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm6[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0],ymm10[1],ymm2[2,3],ymm10[4],ymm2[5,6],ymm10[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0],ymm1[1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm5[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0],ymm11[1],ymm3[2,3],ymm11[4],ymm3[5,6],ymm11[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[1,1,2,2] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0],ymm2[1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm12[2],ymm1[3,4],ymm12[5],ymm1[6,7] +; AVX2-NEXT: vbroadcastsd 64(%rdx), %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7] +; AVX2-NEXT: vbroadcastsd 96(%rdx), %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7] +; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7] +; AVX2-NEXT: vmovaps %ymm4, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm2, 288(%rcx) +; AVX2-NEXT: vmovaps %ymm3, 192(%rcx) +; AVX2-NEXT: vmovaps %ymm1, (%rcx) +; AVX2-NEXT: vmovaps %ymm5, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm11, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm6, 128(%rcx) +; AVX2-NEXT: vmovaps %ymm10, 160(%rcx) +; AVX2-NEXT: vmovaps %ymm8, 320(%rcx) +; AVX2-NEXT: vmovaps %ymm9, 352(%rcx) +; AVX2-NEXT: vmovaps %ymm7, 224(%rcx) +; AVX2-NEXT: vmovaps %ymm0, 256(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i32_stride3_vf32: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm2 -; AVX512-NEXT: vmovdqu64 64(%rsi), %zmm3 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm4 -; AVX512-NEXT: vmovdqu64 64(%rdx), %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,16,u,1,17,u,2,18,u,3,19,u,4,20,u,5> -; AVX512-NEXT: vmovdqa64 %zmm0, %zmm7 -; AVX512-NEXT: vpermt2d %zmm2, %zmm6, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15] -; AVX512-NEXT: vpermt2d %zmm4, %zmm8, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <10,27,u,11,28,u,12,29,u,13,30,u,14,31,u,15> -; AVX512-NEXT: vmovdqa64 %zmm5, %zmm10 -; AVX512-NEXT: vpermt2d %zmm1, %zmm9, %zmm10 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,27,3,4,28,6,7,29,9,10,30,12,13,31,15] -; AVX512-NEXT: vpermt2d %zmm3, %zmm11, %zmm10 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <5,21,u,6,22,u,7,23,u,8,24,u,9,25,u,10> -; AVX512-NEXT: vmovdqa64 %zmm3, %zmm13 -; AVX512-NEXT: vpermt2d %zmm5, %zmm12, %zmm13 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,22,3,4,23,6,7,24,9,10,25,12,13,26,15] -; AVX512-NEXT: vpermt2d %zmm1, %zmm14, %zmm13 -; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm1 -; AVX512-NEXT: vpermt2d %zmm5, %zmm8, %zmm1 -; AVX512-NEXT: vpermi2d %zmm0, %zmm4, %zmm9 -; AVX512-NEXT: vpermt2d %zmm2, %zmm11, %zmm9 -; AVX512-NEXT: vpermt2d %zmm4, %zmm12, %zmm2 -; AVX512-NEXT: vpermt2d %zmm0, %zmm14, %zmm2 -; AVX512-NEXT: vmovdqu64 %zmm2, 64(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm9, 128(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm1, 192(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm13, 256(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm10, 320(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm7, (%rcx) +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm0 +; AVX512-NEXT: vmovdqu64 64(%rsi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 +; AVX512-NEXT: vmovdqu64 64(%rdx), %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15] +; AVX512-NEXT: vmovdqa64 %zmm0, %zmm5 +; AVX512-NEXT: vpermt2d %zmm2, %zmm4, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31] +; AVX512-NEXT: vmovdqa64 %zmm1, %zmm7 +; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm7 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15] +; AVX512-NEXT: vmovdqa64 %zmm0, %zmm9 +; AVX512-NEXT: vpermt2d %zmm3, %zmm8, %zmm9 +; AVX512-NEXT: vpermi2d %zmm3, %zmm1, %zmm4 +; AVX512-NEXT: vpermt2d %zmm2, %zmm6, %zmm0 +; AVX512-NEXT: vpermt2d %zmm2, %zmm8, %zmm1 +; AVX512-NEXT: vmovdqu64 %zmm1, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm4, 192(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm9, 256(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm7, 320(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm5, (%rcx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <32 x i32>, <32 x i32>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll @@ -469,176 +469,176 @@ ; AVX1-LABEL: store_i32_stride4_vf16: ; AVX1: # %bb.0: ; AVX1-NEXT: subq $24, %rsp -; AVX1-NEXT: vmovaps 16(%rdi), %xmm2 -; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovaps 32(%rdi), %xmm13 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm7 +; AVX1-NEXT: vmovaps 32(%rdi), %xmm6 ; AVX1-NEXT: vmovaps 48(%rdi), %xmm11 -; AVX1-NEXT: vmovaps 16(%rsi), %xmm1 -; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovaps 32(%rsi), %xmm8 -; AVX1-NEXT: vmovaps 48(%rsi), %xmm9 -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm10 -; AVX1-NEXT: vmovaps 16(%rcx), %xmm0 -; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 16(%rsi), %xmm13 +; AVX1-NEXT: vmovaps 32(%rsi), %xmm14 +; AVX1-NEXT: vmovaps 48(%rsi), %xmm10 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm10[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm8 +; AVX1-NEXT: vmovaps 16(%rcx), %xmm12 ; AVX1-NEXT: vmovaps 32(%rcx), %xmm3 -; AVX1-NEXT: vmovaps 48(%rcx), %xmm6 +; AVX1-NEXT: vmovaps 48(%rcx), %xmm2 ; AVX1-NEXT: vmovaps 16(%rdx), %xmm15 -; AVX1-NEXT: vmovaps 32(%rdx), %xmm4 -; AVX1-NEXT: vmovaps 48(%rdx), %xmm5 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm0[0],xmm15[1],xmm0[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm15[0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3],ymm10[4,5],ymm0[6,7] +; AVX1-NEXT: vmovaps 32(%rdx), %xmm1 +; AVX1-NEXT: vmovaps 48(%rdx), %xmm4 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm2[0],xmm4[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm5, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm13[1],xmm8[1],zero,zero -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm4[0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm7, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX1-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[1],xmm14[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm14[0],xmm6[1],xmm14[1] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm8, %ymm8 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm3[0],xmm1[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm9[1],zero,zero -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm5[0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0] -; AVX1-NEXT: vunpcklps {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm1, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX1-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm7[1],xmm13[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm13[0],xmm7[1],xmm13[1] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm12[0],xmm15[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm7 = xmm15[0],xmm12[0],xmm15[1],xmm12[1] +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3],ymm5[4,5],ymm6[6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps (%rdi), %xmm2 -; AVX1-NEXT: vmovaps (%rsi), %xmm1 -; AVX1-NEXT: vinsertps {{.*#+}} xmm7 = xmm2[1],xmm1[1],zero,zero -; AVX1-NEXT: vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm10, %ymm10 -; AVX1-NEXT: vmovaps (%rcx), %xmm7 +; AVX1-NEXT: vmovaps (%rdi), %xmm8 +; AVX1-NEXT: vmovaps (%rsi), %xmm7 +; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm8[1],xmm7[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm9 +; AVX1-NEXT: vmovaps (%rcx), %xmm6 ; AVX1-NEXT: vmovaps (%rdx), %xmm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm0[0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm12 = xmm12[0,1,2,0] -; AVX1-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] -; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm12, %ymm12 -; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm12 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm4[2],xmm3[2] -; AVX1-NEXT: vinsertf128 $1, %xmm12, %ymm3, %ymm12 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm13[2],xmm8[2],xmm13[3],xmm8[3] -; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm8[3,0],xmm13[3,0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3],ymm3[4,5],ymm12[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm6[2],xmm5[3],xmm6[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm5[2],xmm6[2] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm9[2],xmm11[3],xmm9[3] -; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,0],xmm11[3,0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm0[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] +; AVX1-NEXT: vinsertf128 $1, %xmm13, %ymm5, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm6[2],xmm0[3],xmm6[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm6[2] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm7[3,0],xmm8[3,0] ; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3] ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm7[2],xmm0[3],xmm7[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm7[2] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[3,0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm15[2],xmm2[2],xmm15[3],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm15[2],xmm2[2] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload -; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,0],xmm6[3,0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm0[2,3],ymm5[4,5],ymm0[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm4[2],xmm2[2] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm11[2],xmm10[2],xmm11[3],xmm10[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm10[3,0],xmm11[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = zero,zero,xmm1[2],xmm3[2] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm14[2],xmm3[3],xmm14[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm14[3,0],xmm3[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] -; AVX1-NEXT: vmovaps %ymm1, 96(%r8) -; AVX1-NEXT: vmovaps %ymm0, 32(%r8) -; AVX1-NEXT: vmovaps %ymm4, 224(%r8) -; AVX1-NEXT: vmovaps %ymm3, 160(%r8) -; AVX1-NEXT: vmovaps %ymm10, (%r8) +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm12[2],xmm15[3],xmm12[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm15[2],xmm12[2] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload +; AVX1-NEXT: vunpckhps {{.*#+}} xmm3 = xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,0],xmm5[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7] +; AVX1-NEXT: vmovaps %ymm2, 96(%r8) +; AVX1-NEXT: vmovaps %ymm1, 160(%r8) +; AVX1-NEXT: vmovaps %ymm0, 224(%r8) +; AVX1-NEXT: vmovaps %ymm8, 32(%r8) +; AVX1-NEXT: vmovaps %ymm9, (%r8) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 192(%r8) +; AVX1-NEXT: vmovaps %ymm0, 64(%r8) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm0, 128(%r8) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 64(%r8) +; AVX1-NEXT: vmovaps %ymm0, 192(%r8) ; AVX1-NEXT: addq $24, %rsp ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i32_stride4_vf16: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps 32(%rdi), %ymm13 -; AVX2-NEXT: vmovaps (%rdi), %ymm11 -; AVX2-NEXT: vmovaps 32(%rsi), %ymm14 -; AVX2-NEXT: vmovaps (%rsi), %ymm12 -; AVX2-NEXT: vmovaps 32(%rdx), %ymm5 -; AVX2-NEXT: vmovaps (%rdx), %ymm15 -; AVX2-NEXT: vmovaps 32(%rcx), %ymm7 -; AVX2-NEXT: vmovaps (%rcx), %xmm6 -; AVX2-NEXT: vmovaps 32(%rcx), %xmm0 -; AVX2-NEXT: vmovaps (%rdx), %xmm1 -; AVX2-NEXT: vmovaps 32(%rdx), %xmm2 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1] -; AVX2-NEXT: vmovaps 32(%rsi), %xmm3 +; AVX2-NEXT: vmovaps (%rdi), %ymm9 +; AVX2-NEXT: vmovaps (%rcx), %xmm10 +; AVX2-NEXT: vmovaps 32(%rcx), %xmm3 +; AVX2-NEXT: vmovaps (%rdx), %xmm5 +; AVX2-NEXT: vmovaps 32(%rdx), %xmm6 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm10[2],xmm5[3],xmm10[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1] +; AVX2-NEXT: vmovaps (%rsi), %xmm7 +; AVX2-NEXT: vmovaps 32(%rsi), %xmm2 +; AVX2-NEXT: vmovaps (%rdi), %xmm0 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm4 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7] -; AVX2-NEXT: vmovaps (%rsi), %xmm8 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; AVX2-NEXT: vmovaps (%rdi), %xmm2 -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm1[2,3],ymm8[4,5],ymm1[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm11 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5],ymm8[6,7] +; AVX2-NEXT: vmovaps 32(%rdi), %ymm11 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm3[2],xmm6[3],xmm3[3] +; AVX2-NEXT: vmovaps (%rsi), %ymm6 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX2-NEXT: vmovaps 32(%rsi), %ymm4 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,1,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7] -; AVX2-NEXT: vmovaps (%rcx), %ymm3 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,1,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[4],ymm3[4],ymm15[5],ymm3[5] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7] +; AVX2-NEXT: vmovaps 32(%rdx), %ymm2 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1] +; AVX2-NEXT: vmovaps 32(%rcx), %ymm10 +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5],ymm5[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[4],ymm10[4],ymm2[5],ymm10[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm7 = ymm11[0],ymm4[0],ymm11[1],ymm4[1],ymm11[4],ymm4[4],ymm11[5],ymm4[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7] +; AVX2-NEXT: vmovaps (%rdx), %ymm7 +; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[6],ymm10[6],ymm2[7],ymm10[7] +; AVX2-NEXT: vmovaps (%rcx), %ymm10 ; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7] ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[4],ymm10[4],ymm7[5],ymm10[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3] -; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7] -; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5] -; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} ymm3 = ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[6],ymm3[6],ymm15[7],ymm3[7] -; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7] -; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,3],ymm11[4,5],ymm4[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm7 = ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[6],ymm10[6],ymm7[7],ymm10[7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] -; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7] -; AVX2-NEXT: vmovaps %ymm3, 96(%r8) +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7] +; AVX2-NEXT: vmovaps %ymm6, 96(%r8) +; AVX2-NEXT: vmovaps %ymm4, 64(%r8) +; AVX2-NEXT: vmovaps %ymm2, 224(%r8) ; AVX2-NEXT: vmovaps %ymm5, 192(%r8) -; AVX2-NEXT: vmovaps %ymm4, 224(%r8) -; AVX2-NEXT: vmovaps %ymm2, 64(%r8) -; AVX2-NEXT: vmovaps %ymm1, (%r8) -; AVX2-NEXT: vmovaps %ymm9, 32(%r8) -; AVX2-NEXT: vmovaps %ymm0, 128(%r8) -; AVX2-NEXT: vmovaps %ymm10, 160(%r8) +; AVX2-NEXT: vmovaps %ymm0, (%r8) +; AVX2-NEXT: vmovaps %ymm3, 160(%r8) +; AVX2-NEXT: vmovaps %ymm8, 128(%r8) +; AVX2-NEXT: vmovaps %ymm1, 32(%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1152,49 +1152,49 @@ ; AVX2-NEXT: vmovaps (%rdx), %xmm14 ; AVX2-NEXT: vmovaps 32(%rdx), %xmm12 ; AVX2-NEXT: vmovaps 64(%rdx), %xmm3 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm3[2],xmm11[2],xmm3[3],xmm11[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1] ; AVX2-NEXT: vmovaps 32(%rsi), %xmm4 ; AVX2-NEXT: vmovaps 64(%rsi), %xmm7 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm0 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm5 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm5[2],xmm7[2],xmm5[3],xmm7[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm9 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7] ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7] ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm10[2],xmm12[3],xmm10[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7] ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm10[2],xmm12[3],xmm10[3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7] ; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; AVX2-NEXT: vmovaps 96(%rcx), %xmm10 ; AVX2-NEXT: vmovaps 96(%rdx), %xmm3 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm3[2],xmm10[2],xmm3[3],xmm10[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm3[0],xmm10[0],xmm3[1],xmm10[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm4[0,0,2,1] ; AVX2-NEXT: vmovaps 96(%rsi), %xmm4 ; AVX2-NEXT: vmovaps 96(%rdi), %xmm0 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm12 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm8[2,3],ymm12[4,5],ymm8[6,7] ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps (%rsi), %xmm1 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3] ; AVX2-NEXT: vmovaps (%rdi), %xmm10 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7] ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -1219,43 +1219,43 @@ ; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3] ; AVX2-NEXT: vmovaps 64(%rdi), %ymm10 ; AVX2-NEXT: vmovaps 64(%rsi), %ymm14 -; AVX2-NEXT: vunpckhps {{.*#+}} ymm9 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm9 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm1[2,3],ymm9[4,5],ymm1[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7] ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm0[2,3],ymm6[4,5],ymm0[6,7] ; AVX2-NEXT: vmovaps 32(%rdx), %ymm6 ; AVX2-NEXT: vmovaps 32(%rcx), %ymm9 -; AVX2-NEXT: vunpckhps {{.*#+}} ymm10 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm10 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3] ; AVX2-NEXT: vmovaps 32(%rdi), %ymm14 ; AVX2-NEXT: vmovaps 32(%rsi), %ymm0 -; AVX2-NEXT: vunpckhps {{.*#+}} ymm11 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7] ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7] ; AVX2-NEXT: vmovaps 96(%rdx), %ymm6 ; AVX2-NEXT: vmovaps 96(%rcx), %ymm9 -; AVX2-NEXT: vunpckhps {{.*#+}} ymm11 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,2,3] ; AVX2-NEXT: vmovaps 96(%rdi), %ymm14 ; AVX2-NEXT: vmovaps 96(%rsi), %ymm0 -; AVX2-NEXT: vunpckhps {{.*#+}} ymm8 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm8 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5] ; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5] -; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7] ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3] ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7] @@ -1265,28 +1265,28 @@ ; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3] ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7] ; AVX2-NEXT: vmovaps %ymm6, 96(%r8) -; AVX2-NEXT: vmovaps %ymm0, 448(%r8) -; AVX2-NEXT: vmovaps %ymm8, 480(%r8) -; AVX2-NEXT: vmovaps %ymm1, 192(%r8) -; AVX2-NEXT: vmovaps %ymm10, 224(%r8) -; AVX2-NEXT: vmovaps %ymm4, 320(%r8) -; AVX2-NEXT: vmovaps %ymm7, 352(%r8) +; AVX2-NEXT: vmovaps %ymm0, 480(%r8) +; AVX2-NEXT: vmovaps %ymm8, 448(%r8) +; AVX2-NEXT: vmovaps %ymm1, 224(%r8) +; AVX2-NEXT: vmovaps %ymm10, 192(%r8) +; AVX2-NEXT: vmovaps %ymm4, 352(%r8) +; AVX2-NEXT: vmovaps %ymm7, 320(%r8) ; AVX2-NEXT: vmovaps %ymm12, 64(%r8) ; AVX2-NEXT: vmovaps %ymm15, (%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm0, 32(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 384(%r8) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm0, 416(%r8) -; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 128(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm0, 384(%r8) +; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm0, 160(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 256(%r8) +; AVX2-NEXT: vmovaps %ymm0, 128(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm0, 288(%r8) +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm0, 256(%r8) ; AVX2-NEXT: addq $168, %rsp ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll @@ -172,117 +172,72 @@ ; SSE-LABEL: store_i32_stride6_vf4: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movapd (%rdi), %xmm0 -; SSE-NEXT: movapd (%rsi), %xmm9 -; SSE-NEXT: movapd (%rdx), %xmm2 -; SSE-NEXT: movapd (%rcx), %xmm8 -; SSE-NEXT: movapd (%r8), %xmm1 -; SSE-NEXT: movapd (%r9), %xmm5 -; SSE-NEXT: movapd %xmm2, %xmm6 -; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1] -; SSE-NEXT: movapd %xmm0, %xmm7 -; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1] -; SSE-NEXT: movapd %xmm7, %xmm3 -; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm6[0] -; SSE-NEXT: movapd %xmm1, %xmm4 -; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1] -; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3] -; SSE-NEXT: movapd %xmm0, %xmm5 -; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1] -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[0],xmm7[1] -; SSE-NEXT: movapd %xmm4, 16(%rax) -; SSE-NEXT: movapd %xmm0, 48(%rax) -; SSE-NEXT: movaps %xmm1, 80(%rax) -; SSE-NEXT: movapd %xmm5, 64(%rax) -; SSE-NEXT: movapd %xmm6, 32(%rax) -; SSE-NEXT: movapd %xmm3, (%rax) +; SSE-NEXT: movaps (%rdx), %xmm0 +; SSE-NEXT: movaps (%rcx), %xmm1 +; SSE-NEXT: movaps (%r8), %xmm2 +; SSE-NEXT: movaps (%r9), %xmm3 +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm2[2,0] +; SSE-NEXT: movaps %xmm3, %xmm5 +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm2[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm2[2,3] +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,0],xmm2[2,0] +; SSE-NEXT: movaps %xmm2, %xmm7 +; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,0] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,0] +; SSE-NEXT: movaps %xmm1, 48(%rax) +; SSE-NEXT: movaps %xmm0, (%rax) +; SSE-NEXT: movaps %xmm3, 16(%rax) +; SSE-NEXT: movaps %xmm7, 80(%rax) +; SSE-NEXT: movaps %xmm5, 64(%rax) +; SSE-NEXT: movaps %xmm2, 32(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride6_vf4: ; AVX1: # %bb.0: ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps (%rdi), %xmm0 -; AVX1-NEXT: vmovaps (%rsi), %xmm1 -; AVX1-NEXT: vmovaps (%rdx), %xmm2 -; AVX1-NEXT: vmovaps (%rcx), %xmm3 -; AVX1-NEXT: vmovaps (%r8), %xmm4 -; AVX1-NEXT: vmovaps (%r9), %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm12 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm7 -; AVX1-NEXT: vunpcklps {{.*#+}} ymm8 = ymm7[0],ymm12[0],ymm7[1],ymm12[1],ymm7[4],ymm12[4],ymm7[5],ymm12[5] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm9 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm10 -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2] -; AVX1-NEXT: vpermilps {{.*#+}} ymm11 = ymm11[0,2,3,1,4,6,7,5] -; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm3[0,0],xmm2[0,0] -; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0] -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0,1],ymm6[2,3],ymm11[4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5],ymm6[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm9 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,2],ymm9[1,2],ymm2[5,6],ymm9[5,6] -; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm3[0,2,3,1,4,6,7,5] -; AVX1-NEXT: vunpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5],ymm3[6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm12[1],ymm7[1],ymm12[3],ymm7[3] -; AVX1-NEXT: vpermilps {{.*#+}} ymm4 = ymm4[0,2,3,1,4,6,7,5] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm9[3,0],ymm2[7,4],ymm9[7,4] -; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX1-NEXT: vmovaps %ymm0, 64(%rax) -; AVX1-NEXT: vmovaps %ymm3, 32(%rax) -; AVX1-NEXT: vmovaps %ymm6, (%rax) +; AVX1-NEXT: vmovaps (%rdx), %xmm0 +; AVX1-NEXT: vmovaps (%r8), %xmm1 +; AVX1-NEXT: vmovaps (%r9), %xmm2 +; AVX1-NEXT: vinsertf128 $1, (%rcx), %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm3 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm4 +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm3[1],ymm4[1],ymm3[3],ymm4[3] +; AVX1-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,2,3,1,4,6,7,5] +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3,4,5],ymm5[6,7] +; AVX1-NEXT: vunpcklps {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5] +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm0, 32(%rax) +; AVX1-NEXT: vmovaps %ymm3, (%rax) +; AVX1-NEXT: vmovaps %ymm5, 64(%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i32_stride6_vf4: ; AVX2: # %bb.0: ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: vmovaps (%rdi), %xmm0 -; AVX2-NEXT: vmovaps (%rsi), %xmm1 -; AVX2-NEXT: vmovaps (%rdx), %xmm2 -; AVX2-NEXT: vmovaps (%r8), %xmm3 -; AVX2-NEXT: vmovaps (%r9), %xmm4 -; AVX2-NEXT: vmovaps {{.*#+}} xmm5 = -; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm2, %ymm2 -; AVX2-NEXT: vpermps %ymm2, %ymm5, %ymm5 -; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm6 -; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm7 = [0,4,1,5,0,4,1,5] -; AVX2-NEXT: # ymm7 = mem[0,1,0,1] -; AVX2-NEXT: vpermps %ymm6, %ymm7, %ymm7 -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6,7] -; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm7 -; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm8 = [17179869184,17179869184,17179869184,17179869184] -; AVX2-NEXT: vpermps %ymm7, %ymm8, %ymm8 -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5],ymm5[6,7] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [1,5,2,6,1,5,2,6] -; AVX2-NEXT: # ymm4 = mem[0,1,0,1] -; AVX2-NEXT: vpermps %ymm2, %ymm4, %ymm4 -; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [25769803778,25769803778,25769803778,25769803778] -; AVX2-NEXT: vpermps %ymm6, %ymm4, %ymm4 -; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,6,3,7,2,6,3,7] -; AVX2-NEXT: # ymm1 = mem[0,1,0,1] -; AVX2-NEXT: vpermps %ymm7, %ymm1, %ymm1 -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7] -; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [30064771075,30064771075,30064771075,30064771075] -; AVX2-NEXT: vpermps %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX2-NEXT: vmovaps %ymm0, 64(%rax) -; AVX2-NEXT: vmovaps %ymm3, 32(%rax) -; AVX2-NEXT: vmovaps %ymm5, (%rax) +; AVX2-NEXT: vmovaps (%rdx), %xmm0 +; AVX2-NEXT: vmovaps (%r8), %xmm1 +; AVX2-NEXT: vmovaps (%r9), %xmm2 +; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm3 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [17179869184,17179869184,17179869184,17179869184] +; AVX2-NEXT: vpermps %ymm3, %ymm4, %ymm4 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7] +; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [2,6,3,7,2,6,3,7] +; AVX2-NEXT: # ymm5 = mem[0,1,0,1] +; AVX2-NEXT: vpermps %ymm3, %ymm5, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vmovaps %ymm0, 32(%rax) +; AVX2-NEXT: vmovaps %ymm3, 64(%rax) +; AVX2-NEXT: vmovaps %ymm4, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -296,8 +251,8 @@ ; AVX512-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 ; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512-NEXT: vinserti32x4 $1, (%r9), %zmm2, %zmm1 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,6,19,23,27,31,3,7] -; AVX512-NEXT: vpermi2d %zmm0, %zmm1, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [18,22,3,7,11,15,19,23] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,8,12,16,20,1,5,9,13,17,21,2,6,10,14] ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 ; AVX512-NEXT: vmovdqu64 %zmm3, (%rax) @@ -328,231 +283,177 @@ ; SSE-LABEL: store_i32_stride6_vf8: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps (%rdi), %xmm1 -; SSE-NEXT: movaps 16(%rdi), %xmm6 -; SSE-NEXT: movaps (%rsi), %xmm8 -; SSE-NEXT: movaps 16(%rsi), %xmm12 -; SSE-NEXT: movaps (%rdx), %xmm14 -; SSE-NEXT: movaps 16(%rdx), %xmm5 -; SSE-NEXT: movaps (%rcx), %xmm9 -; SSE-NEXT: movaps 16(%rcx), %xmm13 -; SSE-NEXT: movaps (%r8), %xmm7 -; SSE-NEXT: movaps 16(%r8), %xmm3 -; SSE-NEXT: movaps (%r9), %xmm11 -; SSE-NEXT: movaps 16(%r9), %xmm15 -; SSE-NEXT: movaps %xmm3, %xmm2 -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm15[2],xmm2[3],xmm15[3] -; SSE-NEXT: movaps %xmm5, %xmm4 -; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3] -; SSE-NEXT: movaps %xmm4, %xmm10 -; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm2[1] -; SSE-NEXT: movaps %xmm6, %xmm0 -; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3] +; SSE-NEXT: movapd (%r8), %xmm1 +; SSE-NEXT: movaps 16(%r8), %xmm4 +; SSE-NEXT: movaps (%r9), %xmm7 +; SSE-NEXT: movaps 16(%r9), %xmm0 +; SSE-NEXT: movaps %xmm0, %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,0],xmm4[2,0] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: movaps %xmm0, %xmm9 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1] -; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1] -; SSE-NEXT: movaps %xmm5, %xmm13 -; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm3[1] -; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0] -; SSE-NEXT: movaps %xmm7, %xmm15 -; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm11[2],xmm15[3],xmm11[3] -; SSE-NEXT: movaps %xmm14, %xmm5 -; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3] -; SSE-NEXT: movaps %xmm5, %xmm12 -; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm15[1] -; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3] -; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm4[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1] -; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm9[0],xmm14[1],xmm9[1] -; SSE-NEXT: movaps %xmm14, %xmm5 -; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm7[1] -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm14[0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,3] +; SSE-NEXT: movaps %xmm7, %xmm11 +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[3,0],xmm4[2,0] +; SSE-NEXT: movaps %xmm4, %xmm10 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm4[2,3] +; SSE-NEXT: movaps %xmm4, 48(%rax) +; SSE-NEXT: movaps %xmm4, 144(%rax) +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,0] +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm1[2,0] +; SSE-NEXT: movaps %xmm7, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[2,0] +; SSE-NEXT: movaps %xmm1, %xmm5 ; SSE-NEXT: movaps %xmm1, (%rax) -; SSE-NEXT: movaps %xmm7, 16(%rax) +; SSE-NEXT: movaps %xmm1, 96(%rax) +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,0] +; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,0] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,0] +; SSE-NEXT: movaps %xmm6, 16(%rax) ; SSE-NEXT: movaps %xmm5, 32(%rax) -; SSE-NEXT: movaps %xmm4, 48(%rax) -; SSE-NEXT: movaps %xmm15, 64(%rax) -; SSE-NEXT: movaps %xmm12, 80(%rax) -; SSE-NEXT: movaps %xmm6, 96(%rax) -; SSE-NEXT: movaps %xmm3, 112(%rax) -; SSE-NEXT: movaps %xmm13, 128(%rax) -; SSE-NEXT: movaps %xmm0, 144(%rax) -; SSE-NEXT: movaps %xmm2, 160(%rax) -; SSE-NEXT: movaps %xmm10, 176(%rax) +; SSE-NEXT: movaps %xmm2, 64(%rax) +; SSE-NEXT: movaps %xmm10, 80(%rax) +; SSE-NEXT: movaps %xmm0, 112(%rax) +; SSE-NEXT: movaps %xmm1, 128(%rax) +; SSE-NEXT: movaps %xmm3, 160(%rax) +; SSE-NEXT: movaps %xmm4, 176(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride6_vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rdi), %ymm9 -; AVX1-NEXT: vmovaps (%rsi), %ymm11 -; AVX1-NEXT: vmovaps (%rdx), %ymm12 -; AVX1-NEXT: vmovaps (%rcx), %ymm1 -; AVX1-NEXT: vunpcklps {{.*#+}} ymm4 = ymm9[0],ymm11[0],ymm9[1],ymm11[1],ymm9[4],ymm11[4],ymm9[5],ymm11[5] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm12[0],ymm1[2],ymm12[2] -; AVX1-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4] -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7] -; AVX1-NEXT: vmovaps (%rcx), %xmm6 -; AVX1-NEXT: vmovaps (%rdx), %xmm7 -; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm7[1,2],xmm6[1,2] -; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,2,1,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm5, %ymm5 -; AVX1-NEXT: vmovaps (%r9), %xmm4 -; AVX1-NEXT: vmovaps (%r8), %xmm2 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7] -; AVX1-NEXT: vmovaps (%rsi), %xmm3 -; AVX1-NEXT: vmovaps (%rdi), %xmm0 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm13 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; AVX1-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm14 -; AVX1-NEXT: vblendps {{.*#+}} ymm14 = ymm5[0,1,2,3],ymm14[4,5],ymm5[6,7] -; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; AVX1-NEXT: vbroadcastss (%rcx), %xmm3 -; AVX1-NEXT: vbroadcastss (%rdx), %xmm5 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7] -; AVX1-NEXT: vmovaps (%r8), %ymm3 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm6[2],xmm7[3],xmm6[3] -; AVX1-NEXT: vmovaps (%r9), %ymm5 -; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7] -; AVX1-NEXT: vunpcklps {{.*#+}} ymm4 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5] -; AVX1-NEXT: vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3] -; AVX1-NEXT: vunpckhps {{.*#+}} ymm5 = ymm9[2],ymm11[2],ymm9[3],ymm11[3],ymm9[6],ymm11[6],ymm9[7],ymm11[7] -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm12[3,0],ymm1[7,4],ymm12[7,4] -; AVX1-NEXT: vpermilps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5],ymm3[6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,2],ymm1[1,2],ymm12[5,6],ymm1[5,6] -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7] -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3] -; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7] ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %ymm1, 128(%rax) -; AVX1-NEXT: vmovaps %ymm3, 160(%rax) -; AVX1-NEXT: vmovaps %ymm2, 64(%rax) -; AVX1-NEXT: vmovaps %ymm0, (%rax) -; AVX1-NEXT: vmovaps %ymm14, 32(%rax) -; AVX1-NEXT: vmovaps %ymm6, 96(%rax) +; AVX1-NEXT: vmovaps (%r8), %ymm0 +; AVX1-NEXT: vmovaps (%r9), %xmm1 +; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[0,2,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3] +; AVX1-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7] +; AVX1-NEXT: vbroadcastss 16(%r9), %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7] +; AVX1-NEXT: vbroadcastss 20(%r9), %ymm4 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7] +; AVX1-NEXT: vbroadcastss (%r9), %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7] +; AVX1-NEXT: vbroadcastss 4(%r9), %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm0, 32(%rax) +; AVX1-NEXT: vmovaps %ymm5, (%rax) +; AVX1-NEXT: vmovaps %ymm4, 128(%rax) +; AVX1-NEXT: vmovaps %ymm3, 96(%rax) +; AVX1-NEXT: vmovaps %ymm2, 160(%rax) +; AVX1-NEXT: vmovaps %ymm1, 64(%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-LABEL: store_i32_stride6_vf8: -; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa (%rdi), %ymm10 -; AVX2-NEXT: vmovdqa (%rsi), %ymm11 -; AVX2-NEXT: vmovdqa (%rdx), %ymm12 -; AVX2-NEXT: vmovaps (%r9), %xmm3 -; AVX2-NEXT: vmovaps (%r8), %xmm4 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm9 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; AVX2-NEXT: vmovaps (%rcx), %xmm0 -; AVX2-NEXT: vpermilps {{.*#+}} xmm5 = xmm0[0,1,2,2] -; AVX2-NEXT: vmovaps (%rdx), %xmm1 -; AVX2-NEXT: vpermilps {{.*#+}} xmm6 = xmm1[1,1,2,3] -; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,3],ymm5[4,5,6,7] -; AVX2-NEXT: vmovaps (%rsi), %xmm7 -; AVX2-NEXT: vmovaps (%rdi), %xmm2 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm13 = xmm2[2],xmm7[2],xmm2[3],xmm7[3] -; AVX2-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm6 -; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7] -; AVX2-NEXT: vbroadcastss (%rcx), %xmm5 -; AVX2-NEXT: vbroadcastss (%rdx), %xmm6 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm14 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] -; AVX2-NEXT: vmovdqa (%rcx), %ymm6 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] -; AVX2-NEXT: vmovdqa (%r8), %ymm5 -; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vmovdqa (%r9), %ymm7 -; AVX2-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9 -; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3] -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7] -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm12[2],ymm6[2],ymm12[3],ymm6[3],ymm12[6],ymm6[6],ymm12[7],ymm6[7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero -; AVX2-NEXT: vpbroadcastd 20(%r9), %ymm4 -; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm6[0,1,2,2,4,5,6,6] -; AVX2-NEXT: vpshufd {{.*#+}} ymm13 = ymm12[1,1,2,3,5,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm13[0],ymm4[1],ymm13[2],ymm4[3],ymm13[4],ymm4[5],ymm13[6],ymm4[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5],ymm3[6,7] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm3 = ymm12[0],ymm6[0],ymm12[1],ymm6[1],ymm12[4],ymm6[4],ymm12[5],ymm6[5] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm4 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5] -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2] -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm4 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5] -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7] -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: vmovdqa %ymm3, 96(%rax) -; AVX2-NEXT: vmovdqa %ymm1, 128(%rax) -; AVX2-NEXT: vmovdqa %ymm2, 160(%rax) -; AVX2-NEXT: vmovaps %ymm0, 64(%rax) -; AVX2-NEXT: vmovaps %ymm9, (%rax) -; AVX2-NEXT: vmovaps %ymm8, 32(%rax) -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX2-SLOW-LABEL: store_i32_stride6_vf8: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm0 +; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm1 +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,2,3,3] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7] +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,2,2,3,4,6,6,7] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7] +; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %ymm1 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7] +; AVX2-SLOW-NEXT: vbroadcastss 16(%r9), %ymm4 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7] +; AVX2-SLOW-NEXT: vbroadcastss 20(%r9), %ymm5 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3],ymm0[4,5,6,7] +; AVX2-SLOW-NEXT: vbroadcastss 4(%r9), %ymm6 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7] +; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm5, 128(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm4, 96(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm3, 160(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%rax) +; AVX2-SLOW-NEXT: vzeroupper +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-ALL-LABEL: store_i32_stride6_vf8: +; AVX2-FAST-ALL: # %bb.0: +; AVX2-FAST-ALL-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-ALL-NEXT: vmovaps (%r8), %ymm0 +; AVX2-FAST-ALL-NEXT: vmovaps (%r9), %ymm1 +; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm2 = +; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm2, %ymm2 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7] +; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [2,2,3,3,2,2,3,3] +; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1] +; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm3, %ymm1 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 16(%r9), %ymm3 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 20(%r9), %ymm4 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3],ymm0[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss (%r9), %ymm5 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 4(%r9), %ymm6 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 32(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm5, (%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm4, 128(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm3, 96(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm1, 64(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm2, 160(%rax) +; AVX2-FAST-ALL-NEXT: vzeroupper +; AVX2-FAST-ALL-NEXT: retq +; +; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf8: +; AVX2-FAST-PERLANE: # %bb.0: +; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm0 +; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm1 +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,2,3,3] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6],ymm2[7] +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,2,2,3,4,6,6,7] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,2,3] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm1, %ymm1 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%r9), %ymm4 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 20(%r9), %ymm5 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3],ymm0[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 4(%r9), %ymm6 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 128(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, 96(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 160(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 64(%rax) +; AVX2-FAST-PERLANE-NEXT: vzeroupper +; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512-LABEL: store_i32_stride6_vf8: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512-NEXT: vmovdqa (%rdx), %ymm1 -; AVX512-NEXT: vmovdqa (%r8), %ymm2 -; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1 -; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,8,16,24,u,u,1,9,17,25,u,u,2,10,18,26> +; AVX512-NEXT: vmovdqa (%rdx), %ymm0 +; AVX512-NEXT: vmovdqa (%r8), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm0, %zmm0 +; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm1, %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,16,24,6,7,8,9,17,25,12,13,14,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [18,26,2,3,4,5,19,27,8,9,10,11,20,28,14,15] ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,16,24,6,7,8,9,17,25,12,13,14,15] -; AVX512-NEXT: vpermi2d %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,10,19,27,u,u,3,11,20,28,u,u,4,12,21,29> -; AVX512-NEXT: vpermi2d %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,19,27,6,7,8,9,20,28,12,13,14,15] -; AVX512-NEXT: vpermi2d %zmm1, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,13,21,29,u,u,6,14,22,30,u,u,7,15,23,31> -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,22,30,6,7,8,9,23,31,12,13,14,15] -; AVX512-NEXT: vpermi2d %zmm0, %zmm3, %zmm1 -; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rax) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rax) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rax) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,21,29,4,5,6,7,22,30,10,11,12,13,23,31] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <8 x i32>, <8 x i32>* %in.vecptr0, align 32 @@ -578,564 +479,317 @@ define void @store_i32_stride6_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vecptr1, <16 x i32>* %in.vecptr2, <16 x i32>* %in.vecptr3, <16 x i32>* %in.vecptr4, <16 x i32>* %in.vecptr5, <96 x i32>* %out.vec) nounwind { ; SSE-LABEL: store_i32_stride6_vf16: ; SSE: # %bb.0: -; SSE-NEXT: subq $72, %rsp -; SSE-NEXT: movapd (%rdi), %xmm12 -; SSE-NEXT: movapd 16(%rdi), %xmm10 -; SSE-NEXT: movapd (%rsi), %xmm13 -; SSE-NEXT: movapd 16(%rsi), %xmm8 -; SSE-NEXT: movapd (%rdx), %xmm4 -; SSE-NEXT: movapd 16(%rdx), %xmm2 -; SSE-NEXT: movapd (%rcx), %xmm5 -; SSE-NEXT: movapd 16(%rcx), %xmm9 -; SSE-NEXT: movapd (%r8), %xmm7 -; SSE-NEXT: movapd 16(%r8), %xmm14 -; SSE-NEXT: movapd (%r9), %xmm0 -; SSE-NEXT: movapd 16(%r9), %xmm11 -; SSE-NEXT: movapd %xmm4, %xmm1 -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE-NEXT: movapd %xmm12, %xmm3 -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1] -; SSE-NEXT: movapd %xmm3, %xmm6 -; SSE-NEXT: unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm1[0] -; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm7, %xmm6 -; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1] -; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1] -; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm1[1],xmm6[1] -; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3] -; SSE-NEXT: movapd %xmm12, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill -; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm0[2],xmm7[3],xmm0[3] -; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm7[0],xmm12[1] -; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movhlps {{.*#+}} xmm7 = xmm4[1],xmm7[1] -; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm2, %xmm0 -; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1] -; SSE-NEXT: movapd %xmm10, %xmm3 -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1] -; SSE-NEXT: movapd %xmm3, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm14, %xmm1 -; SSE-NEXT: movapd %xmm14, %xmm13 -; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1] -; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1] -; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movhlps {{.*#+}} xmm13 = xmm0[1],xmm13[1] -; SSE-NEXT: movapd 32(%rdi), %xmm14 -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3] -; SSE-NEXT: movapd 32(%rdx), %xmm0 -; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3] -; SSE-NEXT: movapd 32(%rcx), %xmm3 -; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3] -; SSE-NEXT: movapd %xmm10, %xmm4 -; SSE-NEXT: unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm2[0] -; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm1[0],xmm10[1] -; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] -; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm0, %xmm2 -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: movapd 32(%rsi), %xmm1 -; SSE-NEXT: movapd %xmm14, %xmm8 -; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1] -; SSE-NEXT: movapd %xmm8, %xmm4 -; SSE-NEXT: unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm2[0] -; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd 32(%r8), %xmm4 -; SSE-NEXT: movapd 32(%r9), %xmm6 -; SSE-NEXT: movapd %xmm4, %xmm5 -; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] -; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1] -; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm2[1],xmm5[1] -; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm14 = xmm14[2],xmm1[2],xmm14[3],xmm1[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3] -; SSE-NEXT: movapd %xmm14, %xmm15 -; SSE-NEXT: unpcklpd {{.*#+}} xmm15 = xmm15[0],xmm0[0] -; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm4[0],xmm14[1] -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1] -; SSE-NEXT: movapd 48(%rdx), %xmm3 -; SSE-NEXT: movapd 48(%rcx), %xmm10 -; SSE-NEXT: movapd %xmm3, %xmm6 -; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1] -; SSE-NEXT: movapd 48(%rdi), %xmm2 -; SSE-NEXT: movapd 48(%rsi), %xmm12 -; SSE-NEXT: movapd %xmm2, %xmm7 -; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1] -; SSE-NEXT: movapd %xmm7, %xmm9 -; SSE-NEXT: unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm6[0] -; SSE-NEXT: movapd 48(%r8), %xmm0 -; SSE-NEXT: movapd 48(%r9), %xmm11 -; SSE-NEXT: movapd %xmm0, %xmm1 -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1] -; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1] -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm6[1],xmm1[1] -; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm12[2],xmm2[3],xmm12[3] -; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3] -; SSE-NEXT: movapd %xmm2, %xmm6 -; SSE-NEXT: unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm3[0] -; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] -; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm3[1],xmm0[1] ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps %xmm0, 368(%rax) -; SSE-NEXT: movapd %xmm2, 352(%rax) -; SSE-NEXT: movapd %xmm6, 336(%rax) -; SSE-NEXT: movaps %xmm1, 320(%rax) -; SSE-NEXT: movapd %xmm7, 304(%rax) -; SSE-NEXT: movapd %xmm9, 288(%rax) -; SSE-NEXT: movaps %xmm4, 272(%rax) -; SSE-NEXT: movapd %xmm14, 256(%rax) -; SSE-NEXT: movapd %xmm15, 240(%rax) -; SSE-NEXT: movaps %xmm5, 224(%rax) -; SSE-NEXT: movapd %xmm8, 208(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 192(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 176(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 160(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rax) -; SSE-NEXT: movaps %xmm13, 128(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 96(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 80(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%rax) -; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%rax) +; SSE-NEXT: movapd (%r8), %xmm5 +; SSE-NEXT: movaps 16(%r8), %xmm9 +; SSE-NEXT: movaps 32(%r8), %xmm13 +; SSE-NEXT: movaps 48(%r8), %xmm4 +; SSE-NEXT: movaps 16(%r9), %xmm15 +; SSE-NEXT: movapd 32(%r9), %xmm1 +; SSE-NEXT: movaps 48(%r9), %xmm0 +; SSE-NEXT: movaps %xmm0, %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,0],xmm4[2,0] +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm5, %xmm2 +; SSE-NEXT: movapd %xmm1, %xmm10 +; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm5[0],xmm10[1] +; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm5[2,3] +; SSE-NEXT: movaps %xmm5, %xmm7 +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[1,0] +; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,3] +; SSE-NEXT: movaps %xmm15, %xmm14 +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,0],xmm4[2,0] +; SSE-NEXT: movaps %xmm4, %xmm3 +; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm15[0] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[2,3] +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,0] +; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm7[0,2] +; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm1, %xmm7 +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,0],xmm9[2,0] +; SSE-NEXT: movaps %xmm9, %xmm12 +; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm7[2,0] +; SSE-NEXT: movapd %xmm1, %xmm7 +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm13[2,0] +; SSE-NEXT: movaps %xmm13, %xmm8 +; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm7[2,0] +; SSE-NEXT: movaps (%r9), %xmm7 +; SSE-NEXT: movaps %xmm4, 48(%rax) +; SSE-NEXT: movaps %xmm4, 240(%rax) +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm14[2,0] +; SSE-NEXT: movaps %xmm15, %xmm14 +; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm5[2,0] +; SSE-NEXT: movaps %xmm9, %xmm4 +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm7[3,0] +; SSE-NEXT: movaps %xmm13, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[1,0] +; SSE-NEXT: movaps %xmm9, %xmm11 +; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm7[0] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,3] +; SSE-NEXT: movaps %xmm5, (%rax) +; SSE-NEXT: movaps %xmm5, 192(%rax) +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm15[2,0] +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm9[0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm9[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm9[2,3] +; SSE-NEXT: movaps %xmm9, 144(%rax) +; SSE-NEXT: movaps %xmm9, 336(%rax) +; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm4[0,2] +; SSE-NEXT: movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm13[0],xmm4[1] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm13[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm13[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm13[2,3] +; SSE-NEXT: movaps %xmm13, 96(%rax) +; SSE-NEXT: movaps %xmm13, 288(%rax) +; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm2[0,2] +; SSE-NEXT: movaps %xmm11, 16(%rax) +; SSE-NEXT: movaps %xmm13, 32(%rax) +; SSE-NEXT: movaps %xmm7, 64(%rax) +; SSE-NEXT: movaps %xmm9, 80(%rax) +; SSE-NEXT: movaps %xmm3, 112(%rax) +; SSE-NEXT: movaps %xmm5, 128(%rax) +; SSE-NEXT: movaps %xmm14, 160(%rax) +; SSE-NEXT: movaps %xmm6, 176(%rax) +; SSE-NEXT: movaps %xmm1, 208(%rax) +; SSE-NEXT: movaps %xmm8, 224(%rax) +; SSE-NEXT: movaps %xmm10, 256(%rax) +; SSE-NEXT: movaps %xmm12, 272(%rax) +; SSE-NEXT: movaps %xmm0, 304(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%rax) +; SSE-NEXT: movaps %xmm0, 320(%rax) +; SSE-NEXT: movaps %xmm4, 352(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%rax) -; SSE-NEXT: addq $72, %rsp +; SSE-NEXT: movaps %xmm0, 368(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i32_stride6_vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: subq $312, %rsp # imm = 0x138 -; AVX1-NEXT: vmovaps (%rdi), %ymm2 -; AVX1-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 -; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps 32(%rsi), %ymm0 -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps 32(%rdx), %ymm12 -; AVX1-NEXT: vmovaps 32(%rcx), %ymm6 -; AVX1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm12[0],ymm6[2],ymm12[2] -; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4] -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps 32(%rcx), %xmm1 -; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovaps 32(%rdx), %xmm0 -; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2] -; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps 32(%r9), %xmm15 -; AVX1-NEXT: vmovaps 32(%r8), %xmm13 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm15[0],xmm13[1],xmm15[1] -; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vmovaps 32(%rsi), %xmm0 -; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovaps 32(%rdi), %xmm14 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm9 = xmm14[2],xmm0[2],xmm14[3],xmm0[3] -; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm8 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps (%rsi), %ymm0 -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3,2,3] -; AVX1-NEXT: vmovaps (%rdx), %ymm7 -; AVX1-NEXT: vmovaps (%rcx), %ymm8 -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm8[0],ymm7[0],ymm8[2],ymm7[2] -; AVX1-NEXT: vpermilps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4] -; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm11 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps (%rcx), %xmm0 -; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovaps (%rdx), %xmm1 -; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm1[1,2],xmm0[1,2] -; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[0,2,1,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4 -; AVX1-NEXT: vmovaps (%r9), %xmm11 -; AVX1-NEXT: vmovaps (%r8), %xmm10 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm11[0],xmm10[1],xmm11[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7] -; AVX1-NEXT: vmovaps (%rsi), %xmm2 -; AVX1-NEXT: vmovaps (%rdi), %xmm1 -; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm4 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7] -; AVX1-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX1-NEXT: vbroadcastss (%rcx), %xmm1 -; AVX1-NEXT: vbroadcastss (%rdx), %xmm3 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vmovaps 32(%r8), %ymm0 -; AVX1-NEXT: vmovaps 32(%r9), %ymm1 -; AVX1-NEXT: vunpcklps {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] -; AVX1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload -; AVX1-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload -; AVX1-NEXT: # ymm4 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,0],ymm12[3,0],ymm6[7,4],ymm12[7,4] -; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm12[1,2],ymm6[1,2],ymm12[5,6],ymm6[5,6] -; AVX1-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm2 # 32-byte Folded Reload -; AVX1-NEXT: # ymm2 = mem[0,1,2,3],ymm3[4,5],mem[6,7] -; AVX1-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] -; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm13[2],xmm15[2],xmm13[3],xmm15[3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm9[2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX1-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload -; AVX1-NEXT: # xmm3 = xmm2[2],mem[2],xmm2[3],mem[3] -; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7] -; AVX1-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm3 # 16-byte Folded Reload -; AVX1-NEXT: # xmm3 = xmm14[0],mem[0],xmm14[1],mem[1] -; AVX1-NEXT: vbroadcastss 32(%rcx), %xmm4 -; AVX1-NEXT: vbroadcastss 32(%rdx), %xmm5 -; AVX1-NEXT: vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7] -; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload -; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7] -; AVX1-NEXT: vmovaps (%r8), %ymm4 -; AVX1-NEXT: vmovaps (%r9), %ymm5 -; AVX1-NEXT: vunpcklps {{.*#+}} ymm9 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5] -; AVX1-NEXT: vunpckhps {{.*#+}} ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7] -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload -; AVX1-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload -; AVX1-NEXT: # ymm5 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7] -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm12 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm12 = ymm8[3,0],ymm7[3,0],ymm8[7,4],ymm7[7,4] -; AVX1-NEXT: vpermilps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm12[4,5],ymm4[6,7] -; AVX1-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm8[1,2],ymm7[5,6],ymm8[5,6] -; AVX1-NEXT: vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm8 # 32-byte Folded Reload -; AVX1-NEXT: # ymm8 = mem[0,1,2,3],ymm9[4,5],mem[6,7] -; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm9 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3] -; AVX1-NEXT: vpermilps {{.*#+}} ymm7 = ymm7[0,2,1,3,4,6,5,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm9[2,3],ymm7[4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5],ymm7[6,7] -; AVX1-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm11[2],xmm10[3],xmm11[3] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm7, %ymm7 -; AVX1-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload -; AVX1-NEXT: # ymm7 = ymm7[0,1],mem[2,3],ymm7[4,5,6,7] -; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; AVX1-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload -; AVX1-NEXT: # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3] -; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[2,3,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5],ymm7[6,7] ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %ymm2, 64(%rax) -; AVX1-NEXT: vmovaps %ymm5, 128(%rax) -; AVX1-NEXT: vmovaps %ymm4, 160(%rax) -; AVX1-NEXT: vmovaps %ymm3, 192(%rax) +; AVX1-NEXT: vmovaps (%r8), %ymm2 +; AVX1-NEXT: vmovaps 32(%r8), %ymm3 +; AVX1-NEXT: vmovaps (%r9), %xmm1 +; AVX1-NEXT: vmovaps 32(%r9), %xmm0 +; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm0[0,2,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6],ymm0[7] +; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm1[0,2,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6],ymm1[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3] +; AVX1-NEXT: vpermilps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = mem[2,3,2,3] +; AVX1-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6],ymm5[7] +; AVX1-NEXT: vbroadcastss (%r9), %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3,4],ymm6[5],ymm2[6,7] +; AVX1-NEXT: vbroadcastss 52(%r9), %ymm7 +; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2],ymm7[3],ymm2[4,5,6,7] +; AVX1-NEXT: vbroadcastss 48(%r9), %ymm8 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3,4],ymm8[5],ymm3[6,7] +; AVX1-NEXT: vbroadcastss 32(%r9), %ymm9 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm2[0,1,2,3,4],ymm9[5],ymm2[6,7] +; AVX1-NEXT: vbroadcastss 36(%r9), %ymm10 +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1,2],ymm10[3],ymm3[4,5,6,7] +; AVX1-NEXT: vbroadcastss 20(%r9), %ymm11 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm11[3],ymm2[4,5,6,7] +; AVX1-NEXT: vbroadcastss 16(%r9), %ymm11 +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3,4],ymm11[5],ymm3[6,7] +; AVX1-NEXT: vbroadcastss 4(%r9), %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm12[3],ymm3[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm3, 32(%rax) +; AVX1-NEXT: vmovaps %ymm11, 96(%rax) +; AVX1-NEXT: vmovaps %ymm2, 128(%rax) +; AVX1-NEXT: vmovaps %ymm10, 224(%rax) +; AVX1-NEXT: vmovaps %ymm9, 192(%rax) +; AVX1-NEXT: vmovaps %ymm8, 288(%rax) +; AVX1-NEXT: vmovaps %ymm7, 320(%rax) +; AVX1-NEXT: vmovaps %ymm6, (%rax) +; AVX1-NEXT: vmovaps %ymm5, 160(%rax) +; AVX1-NEXT: vmovaps %ymm4, 352(%rax) +; AVX1-NEXT: vmovaps %ymm1, 64(%rax) ; AVX1-NEXT: vmovaps %ymm0, 256(%rax) -; AVX1-NEXT: vmovaps %ymm6, 320(%rax) -; AVX1-NEXT: vmovaps %ymm1, 352(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, (%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 32(%rax) -; AVX1-NEXT: vmovaps %ymm8, 96(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 224(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 288(%rax) -; AVX1-NEXT: addq $312, %rsp # imm = 0x138 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; -; AVX2-LABEL: store_i32_stride6_vf16: -; AVX2: # %bb.0: -; AVX2-NEXT: subq $232, %rsp -; AVX2-NEXT: vmovaps (%r9), %xmm13 -; AVX2-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-NEXT: vmovdqa 32(%r9), %xmm12 -; AVX2-NEXT: vmovaps (%r8), %xmm14 -; AVX2-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-NEXT: vmovdqa 32(%r8), %xmm10 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm10[0],xmm12[0],xmm10[1],xmm12[1] -; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vmovaps (%rcx), %xmm6 -; AVX2-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-NEXT: vmovdqa 32(%rcx), %xmm8 -; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm8[0,1,2,2] -; AVX2-NEXT: vmovaps (%rdx), %xmm7 -; AVX2-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-NEXT: vmovdqa 32(%rdx), %xmm4 -; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm4[1,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vmovaps (%rsi), %xmm11 -; AVX2-NEXT: vmovdqa 32(%rsi), %xmm5 -; AVX2-NEXT: vmovdqa 32(%rdi), %xmm3 -; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm5[2],xmm3[3],xmm5[3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm9 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7] -; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm6[0,1,2,2] -; AVX2-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[1,1,2,3] -; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm6[0],xmm0[1],xmm6[2],xmm0[3] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1] -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vmovaps (%rdi), %xmm1 -; AVX2-NEXT: vunpckhps {{.*#+}} xmm7 = xmm1[2],xmm11[2],xmm1[3],xmm11[3] -; AVX2-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm9 -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7] -; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vbroadcastss (%rcx), %xmm0 -; AVX2-NEXT: vbroadcastss (%rdx), %xmm7 -; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] -; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1] -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7] -; AVX2-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm1 -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vmovdqa 32(%r8), %ymm0 -; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill -; AVX2-NEXT: vmovdqa 32(%r9), %ymm11 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm11[2],ymm0[3],ymm11[3],ymm0[6],ymm11[6],ymm0[7],ymm11[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] -; AVX2-NEXT: vmovdqa 32(%rdi), %ymm13 -; AVX2-NEXT: vmovdqa 32(%rsi), %ymm14 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7] -; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vmovdqa 32(%rdx), %ymm1 -; AVX2-NEXT: vmovdqa 32(%rcx), %ymm0 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm15[4,5],ymm7[6,7] -; AVX2-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm7 = mem[0],zero,mem[1],zero -; AVX2-NEXT: vpbroadcastd 52(%r9), %ymm15 -; AVX2-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm15[3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm15 = ymm0[0,1,2,2,4,5,6,6] -; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm1[1,1,2,3,5,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2],ymm15[3],ymm9[4],ymm15[5],ymm9[6],ymm15[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3],ymm9[4,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7] -; AVX2-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm10[2],xmm12[2],xmm10[3],xmm12[3] -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,2,1] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3] -; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] -; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7] -; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vpbroadcastd 32(%rcx), %xmm2 -; AVX2-NEXT: vpbroadcastd 32(%rdx), %xmm6 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7] -; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7] -; AVX2-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vmovdqa (%r8), %ymm3 -; AVX2-NEXT: vmovdqa (%r9), %ymm5 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm6 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3] -; AVX2-NEXT: vmovdqa (%rdi), %ymm7 -; AVX2-NEXT: vmovdqa (%rsi), %ymm8 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm9 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7] -; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm10 -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm10[2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vmovdqa (%rdx), %ymm10 -; AVX2-NEXT: vmovdqa (%rcx), %ymm12 -; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm15 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm15 = ymm15[2,3,2,3,6,7,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm15[4,5],ymm6[6,7] -; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero -; AVX2-NEXT: vpbroadcastd 20(%r9), %ymm4 -; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm12[0,1,2,2,4,5,6,6] -; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm10[1,1,2,3,5,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2],ymm6[3],ymm2[4],ymm6[5],ymm2[6],ymm6[7] -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5],ymm2[6,7] -; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload -; AVX2-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload -; AVX2-NEXT: # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3] -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1] -; AVX2-NEXT: vpblendd $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload -; AVX2-NEXT: # ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload -; AVX2-NEXT: vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload -; AVX2-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3] -; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,3,2,3] -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6 -; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2] -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7] -; AVX2-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm11[0],ymm1[1],ymm11[1],ymm1[4],ymm11[4],ymm1[5],ymm11[5] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm6 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5] -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2] -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vpunpckldq {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5],ymm1[6,7] -; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: vmovdqa %ymm1, 96(%rax) -; AVX2-NEXT: vmovdqa %ymm0, 288(%rax) -; AVX2-NEXT: vmovdqa %ymm4, 64(%rax) -; AVX2-NEXT: vmovdqa %ymm2, 128(%rax) -; AVX2-NEXT: vmovdqa %ymm15, 160(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 192(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 256(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 320(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 352(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, (%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 32(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 224(%rax) -; AVX2-NEXT: addq $232, %rsp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX2-SLOW-LABEL: store_i32_stride6_vf16: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-SLOW-NEXT: vmovaps (%r8), %ymm2 +; AVX2-SLOW-NEXT: vmovaps 32(%r8), %ymm3 +; AVX2-SLOW-NEXT: vmovaps (%r9), %xmm4 +; AVX2-SLOW-NEXT: vmovaps 32(%r9), %xmm5 +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm5[2,2,3,3] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6],ymm0[7] +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm4[2,2,3,3] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6],ymm1[7] +; AVX2-SLOW-NEXT: vbroadcastss %xmm4, %ymm4 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4],ymm4[5],ymm2[6,7] +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0],ymm6[1],ymm3[2,3,4,5,6],ymm6[7] +; AVX2-SLOW-NEXT: vbroadcastss %xmm5, %ymm5 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3,4],ymm5[5],ymm2[6,7] +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm7 = mem[0,2,2,3,4,6,6,7] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3] +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0],ymm7[1],ymm3[2,3,4,5,6],ymm7[7] +; AVX2-SLOW-NEXT: vbroadcastss 52(%r9), %ymm8 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0,1,2],ymm8[3],ymm2[4,5,6,7] +; AVX2-SLOW-NEXT: vbroadcastss 48(%r9), %ymm9 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3,4],ymm9[5],ymm3[6,7] +; AVX2-SLOW-NEXT: vbroadcastss 36(%r9), %ymm10 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1,2],ymm10[3],ymm3[4,5,6,7] +; AVX2-SLOW-NEXT: vbroadcastss 20(%r9), %ymm11 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm11[3],ymm2[4,5,6,7] +; AVX2-SLOW-NEXT: vbroadcastss 16(%r9), %ymm11 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3,4],ymm11[5],ymm3[6,7] +; AVX2-SLOW-NEXT: vbroadcastss 4(%r9), %ymm12 +; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm12[3],ymm3[4,5,6,7] +; AVX2-SLOW-NEXT: vmovaps %ymm3, 32(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm11, 96(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm2, 128(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm10, 224(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm9, 288(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm8, 320(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm7, 160(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm5, 192(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm6, 352(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rax) +; AVX2-SLOW-NEXT: vmovaps %ymm0, 256(%rax) +; AVX2-SLOW-NEXT: vzeroupper +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-ALL-LABEL: store_i32_stride6_vf16: +; AVX2-FAST-ALL: # %bb.0: +; AVX2-FAST-ALL-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-ALL-NEXT: vmovaps (%r8), %ymm1 +; AVX2-FAST-ALL-NEXT: vmovaps 32(%r8), %ymm2 +; AVX2-FAST-ALL-NEXT: vmovaps (%r9), %ymm3 +; AVX2-FAST-ALL-NEXT: vmovaps 32(%r9), %ymm4 +; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm5 = [4,6,2,3,4,6,6,7] +; AVX2-FAST-ALL-NEXT: vpermps %ymm4, %ymm5, %ymm0 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6],ymm0[7] +; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3] +; AVX2-FAST-ALL-NEXT: # ymm6 = mem[0,1,0,1] +; AVX2-FAST-ALL-NEXT: vpermps %ymm4, %ymm6, %ymm4 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0],ymm4[1],ymm1[2,3,4,5,6],ymm4[7] +; AVX2-FAST-ALL-NEXT: vpermps %ymm3, %ymm5, %ymm5 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0],ymm5[1],ymm2[2,3,4,5,6],ymm5[7] +; AVX2-FAST-ALL-NEXT: vpermps %ymm3, %ymm6, %ymm3 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7] +; AVX2-FAST-ALL-NEXT: vbroadcastss (%r9), %ymm6 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3,4],ymm6[5],ymm1[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 52(%r9), %ymm7 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2],ymm7[3],ymm1[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 48(%r9), %ymm8 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3,4],ymm8[5],ymm2[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 32(%r9), %ymm9 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3,4],ymm9[5],ymm1[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 36(%r9), %ymm10 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3],ymm2[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 20(%r9), %ymm11 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm11[3],ymm1[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 16(%r9), %ymm11 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3,4],ymm11[5],ymm2[6,7] +; AVX2-FAST-ALL-NEXT: vbroadcastss 4(%r9), %ymm12 +; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm12[3],ymm2[4,5,6,7] +; AVX2-FAST-ALL-NEXT: vmovaps %ymm2, 32(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm11, 96(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm1, 128(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm10, 224(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm9, 192(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm8, 288(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm7, 320(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm6, (%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm3, 64(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm5, 160(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm4, 256(%rax) +; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, 352(%rax) +; AVX2-FAST-ALL-NEXT: vzeroupper +; AVX2-FAST-ALL-NEXT: retq +; +; AVX2-FAST-PERLANE-LABEL: store_i32_stride6_vf16: +; AVX2-FAST-PERLANE: # %bb.0: +; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX2-FAST-PERLANE-NEXT: vmovaps (%r8), %ymm2 +; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r8), %ymm3 +; AVX2-FAST-PERLANE-NEXT: vmovaps (%r9), %xmm4 +; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%r9), %xmm5 +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm0 = xmm5[2,2,3,3] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6],ymm0[7] +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm1 = xmm4[2,2,3,3] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6],ymm1[7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm4, %ymm4 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4],ymm4[5],ymm2[6,7] +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm6 = mem[0,2,2,3,4,6,6,7] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0],ymm6[1],ymm3[2,3,4,5,6],ymm6[7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss %xmm5, %ymm5 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3,4],ymm5[5],ymm2[6,7] +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm7 = mem[0,2,2,3,4,6,6,7] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,2,3] +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm7 = ymm3[0],ymm7[1],ymm3[2,3,4,5,6],ymm7[7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 52(%r9), %ymm8 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0,1,2],ymm8[3],ymm2[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 48(%r9), %ymm9 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3,4],ymm9[5],ymm3[6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 36(%r9), %ymm10 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1,2],ymm10[3],ymm3[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 20(%r9), %ymm11 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm11[3],ymm2[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 16(%r9), %ymm11 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3,4],ymm11[5],ymm3[6,7] +; AVX2-FAST-PERLANE-NEXT: vbroadcastss 4(%r9), %ymm12 +; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm12[3],ymm3[4,5,6,7] +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm3, 32(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, 96(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 128(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 224(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 288(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm8, 320(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm7, 160(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 192(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 352(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rax) +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 256(%rax) +; AVX2-FAST-PERLANE-NEXT: vzeroupper +; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512-LABEL: store_i32_stride6_vf16: ; AVX512: # %bb.0: -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm2 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm3 -; AVX512-NEXT: vmovdqu64 (%rcx), %zmm4 -; AVX512-NEXT: vmovdqu64 (%r8), %zmm6 -; AVX512-NEXT: vmovdqu64 (%r9), %zmm7 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26] -; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm5 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25] -; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm0 -; AVX512-NEXT: movb $-110, %al -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vmovdqa64 %zmm5, %zmm0 {%k1} -; AVX512-NEXT: vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm7[0],zmm6[1],zmm7[1],zmm6[4],zmm7[4],zmm6[5],zmm7[5],zmm6[8],zmm7[8],zmm6[9],zmm7[9],zmm6[12],zmm7[12],zmm6[13],zmm7[13] -; AVX512-NEXT: movb $36, %al -; AVX512-NEXT: kmovd %eax, %k2 -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm5[0,1,4,5,4,5,0,1] -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [2,18,0,0,4,20,3,19,2,18,0,0,4,20,3,19] -; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm5 -; AVX512-NEXT: vmovdqa64 %zmm8, %zmm5 {%k1} -; AVX512-NEXT: vmovdqa (%rdx), %ymm8 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [3,11,0,8,7,15,4,12] -; AVX512-NEXT: vpermi2d (%rcx), %ymm8, %ymm9 -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm9[0,1,0,1,2,3,0,1] -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [6,22,5,21,0,0,7,23,6,22,5,21,0,0,7,23] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm9 -; AVX512-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1} -; AVX512-NEXT: vmovdqa (%rdi), %ymm8 -; AVX512-NEXT: vpunpckhdq {{.*#+}} ymm8 = ymm8[2],mem[2],ymm8[3],mem[3],ymm8[6],mem[6],ymm8[7],mem[7] -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm8[0,1,2,3,2,3,0,1] -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [14,30,13,29,0,0,15,31,14,30,13,29,0,0,15,31] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30] -; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm10 -; AVX512-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1} -; AVX512-NEXT: vpunpckhdq {{.*#+}} zmm8 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15] -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm8[0,1,6,7,6,7,0,1] -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29] -; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm8 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [10,26,0,0,12,28,11,27,10,26,0,0,12,28,11,27] -; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm7, %zmm6, %zmm11 -; AVX512-NEXT: vmovdqa64 %zmm8, %zmm11 {%k1} -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16] -; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm6 -; AVX512-NEXT: vmovdqa64 %zmm6, %zmm11 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18] -; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm4, %zmm3, %zmm6 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17] -; AVX512-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1} -; AVX512-NEXT: vmovdqa (%r8), %xmm1 -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,0,1,0,1,0,1] -; AVX512-NEXT: vmovdqu64 %zmm3, (%r10) -; AVX512-NEXT: vmovdqu64 %zmm11, 256(%r10) -; AVX512-NEXT: vmovdqu64 %zmm10, 320(%r10) -; AVX512-NEXT: vmovdqu64 %zmm9, 128(%r10) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%r10) -; AVX512-NEXT: vmovdqu64 %zmm0, 192(%r10) +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX512-NEXT: vmovdqu64 (%r8), %zmm0 +; AVX512-NEXT: vmovdqu64 (%r9), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqu64 %zmm7, 320(%rax) +; AVX512-NEXT: vmovdqu64 %zmm6, 256(%rax) +; AVX512-NEXT: vmovdqu64 %zmm5, 192(%rax) +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <16 x i32>, <16 x i32>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll @@ -81,70 +81,50 @@ define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr1, <4 x i64>* %in.vecptr2, <12 x i64>* %out.vec) nounwind { ; SSE-LABEL: store_i64_stride3_vf4: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm0 -; SSE-NEXT: movaps 16(%rdi), %xmm1 -; SSE-NEXT: movaps (%rsi), %xmm2 -; SSE-NEXT: movaps 16(%rsi), %xmm3 -; SSE-NEXT: movaps (%rdx), %xmm4 -; SSE-NEXT: movaps 16(%rdx), %xmm5 -; SSE-NEXT: movaps %xmm3, %xmm6 -; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0] -; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: movaps %xmm0, (%rcx) -; SSE-NEXT: movaps %xmm4, 16(%rcx) -; SSE-NEXT: movaps %xmm3, 32(%rcx) -; SSE-NEXT: movaps %xmm1, 48(%rcx) -; SSE-NEXT: movaps %xmm5, 64(%rcx) -; SSE-NEXT: movaps %xmm6, 80(%rcx) +; SSE-NEXT: movapd (%rsi), %xmm0 +; SSE-NEXT: movapd 16(%rsi), %xmm1 +; SSE-NEXT: movapd (%rdx), %xmm2 +; SSE-NEXT: movapd 16(%rdx), %xmm3 +; SSE-NEXT: movapd %xmm1, %xmm4 +; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] +; SSE-NEXT: movapd %xmm0, (%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1] +; SSE-NEXT: movapd %xmm1, 48(%rcx) +; SSE-NEXT: movapd %xmm4, 16(%rcx) +; SSE-NEXT: movapd %xmm2, 32(%rcx) +; SSE-NEXT: movapd %xmm0, 64(%rcx) +; SSE-NEXT: movapd %xmm3, 80(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i64_stride3_vf4: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd (%rdi), %ymm0 -; AVX1-NEXT: vmovapd (%rdx), %ymm1 -; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm2 -; AVX1-NEXT: vmovaps (%rdi), %xmm3 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm3[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7] -; AVX1-NEXT: vmovapd 16(%rdx), %xmm3 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3] -; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm4 -; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3] -; AVX1-NEXT: vmovapd %ymm0, 32(%rcx) -; AVX1-NEXT: vmovapd %ymm3, 64(%rcx) -; AVX1-NEXT: vmovaps %ymm2, (%rcx) +; AVX1-NEXT: vmovaps (%rsi), %ymm0 +; AVX1-NEXT: vmovaps (%rdx), %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm0[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7] +; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm0, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm3, (%rcx) +; AVX1-NEXT: vmovaps %ymm2, 64(%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride3_vf4: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps (%rdi), %ymm0 +; AVX2-NEXT: vmovaps (%rsi), %ymm0 ; AVX2-NEXT: vmovaps (%rdx), %ymm1 -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] -; AVX2-NEXT: vmovaps 16(%rdx), %xmm3 -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7] -; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm3 -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7] -; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm3 -; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm4 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7] -; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-NEXT: vmovaps %ymm3, (%rcx) +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7] +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) ; AVX2-NEXT: vmovaps %ymm2, 64(%rcx) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -154,8 +134,8 @@ ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vmovdqa (%rdx), %ymm1 ; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,11,15,3] -; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [10,3,7,11] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,8,1,5,9,2,6] ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 ; AVX512-NEXT: vmovdqu64 %zmm3, (%rcx) @@ -179,150 +159,102 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr1, <8 x i64>* %in.vecptr2, <24 x i64>* %out.vec) nounwind { ; SSE-LABEL: store_i64_stride3_vf8: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm3 -; SSE-NEXT: movaps 16(%rdi), %xmm2 -; SSE-NEXT: movaps 32(%rdi), %xmm13 -; SSE-NEXT: movaps 48(%rdi), %xmm12 -; SSE-NEXT: movaps (%rsi), %xmm8 -; SSE-NEXT: movaps 16(%rsi), %xmm9 -; SSE-NEXT: movaps 32(%rsi), %xmm11 -; SSE-NEXT: movaps 48(%rsi), %xmm4 -; SSE-NEXT: movaps (%rdx), %xmm7 -; SSE-NEXT: movaps 16(%rdx), %xmm0 -; SSE-NEXT: movaps 32(%rdx), %xmm6 -; SSE-NEXT: movaps 48(%rdx), %xmm5 -; SSE-NEXT: movaps %xmm4, %xmm10 -; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm5[1] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm12[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm4[0] -; SSE-NEXT: movaps %xmm11, %xmm14 -; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm6[1] -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm13[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm11[0] -; SSE-NEXT: movaps %xmm9, %xmm1 -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm9[0] -; SSE-NEXT: movaps %xmm8, %xmm4 -; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm3[2,3] -; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm8[0] -; SSE-NEXT: movaps %xmm3, (%rcx) -; SSE-NEXT: movaps %xmm7, 16(%rcx) -; SSE-NEXT: movaps %xmm4, 32(%rcx) -; SSE-NEXT: movaps %xmm2, 48(%rcx) -; SSE-NEXT: movaps %xmm0, 64(%rcx) -; SSE-NEXT: movaps %xmm1, 80(%rcx) -; SSE-NEXT: movaps %xmm13, 96(%rcx) -; SSE-NEXT: movaps %xmm6, 112(%rcx) -; SSE-NEXT: movaps %xmm14, 128(%rcx) -; SSE-NEXT: movaps %xmm12, 144(%rcx) -; SSE-NEXT: movaps %xmm5, 160(%rcx) -; SSE-NEXT: movaps %xmm10, 176(%rcx) +; SSE-NEXT: movapd (%rsi), %xmm0 +; SSE-NEXT: movapd 16(%rsi), %xmm8 +; SSE-NEXT: movapd 32(%rsi), %xmm2 +; SSE-NEXT: movapd 48(%rsi), %xmm3 +; SSE-NEXT: movapd (%rdx), %xmm4 +; SSE-NEXT: movapd 16(%rdx), %xmm5 +; SSE-NEXT: movapd 32(%rdx), %xmm6 +; SSE-NEXT: movapd 48(%rdx), %xmm7 +; SSE-NEXT: movapd %xmm8, %xmm1 +; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1] +; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] +; SSE-NEXT: movapd %xmm2, 96(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm7[0],xmm2[1] +; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm3[0],xmm7[1] +; SSE-NEXT: movapd %xmm3, 48(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1] +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm0[0],xmm6[1] +; SSE-NEXT: movapd %xmm0, (%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1] +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm8[0],xmm5[1] +; SSE-NEXT: movapd %xmm8, 144(%rcx) +; SSE-NEXT: movapd %xmm1, 16(%rcx) +; SSE-NEXT: movapd %xmm4, 32(%rcx) +; SSE-NEXT: movapd %xmm0, 64(%rcx) +; SSE-NEXT: movapd %xmm5, 80(%rcx) +; SSE-NEXT: movapd %xmm3, 112(%rcx) +; SSE-NEXT: movapd %xmm6, 128(%rcx) +; SSE-NEXT: movapd %xmm2, 160(%rcx) +; SSE-NEXT: movapd %xmm7, 176(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i64_stride3_vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd 32(%rdi), %ymm0 -; AVX1-NEXT: vmovapd (%rdi), %ymm1 -; AVX1-NEXT: vmovapd 32(%rdx), %ymm2 -; AVX1-NEXT: vmovapd (%rdx), %ymm3 -; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm4 -; AVX1-NEXT: vmovaps (%rdi), %xmm5 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm6[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7] +; AVX1-NEXT: vmovaps (%rsi), %ymm0 +; AVX1-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX1-NEXT: vmovaps (%rdx), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdx), %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4,5],ymm3[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm1[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4,5],ymm2[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm2[2,3],ymm0[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7] ; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm5[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 -; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7] -; AVX1-NEXT: vmovapd 16(%rdx), %xmm6 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm1[2,3],ymm3[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3] -; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm7 -; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3] -; AVX1-NEXT: vmovapd 48(%rdx), %xmm7 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],ymm2[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3] -; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm8 -; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3] -; AVX1-NEXT: vmovapd %ymm1, 32(%rcx) -; AVX1-NEXT: vmovapd %ymm0, 128(%rcx) -; AVX1-NEXT: vmovapd %ymm7, 160(%rcx) -; AVX1-NEXT: vmovapd %ymm6, 64(%rcx) -; AVX1-NEXT: vmovaps %ymm5, (%rcx) -; AVX1-NEXT: vmovaps %ymm4, 96(%rcx) +; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm0[2,3],ymm6[4,5],ymm0[6,7] +; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm1, %ymm7 +; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm1[2,3],ymm7[4,5],ymm1[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm1, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm0, 128(%rcx) +; AVX1-NEXT: vmovaps %ymm5, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm7, 96(%rcx) +; AVX1-NEXT: vmovaps %ymm4, 160(%rcx) +; AVX1-NEXT: vmovaps %ymm6, (%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride3_vf8: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 -; AVX2-NEXT: vmovaps 32(%rdx), %ymm2 -; AVX2-NEXT: vmovaps (%rdx), %ymm3 -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm3[2,3] -; AVX2-NEXT: vmovaps 16(%rdx), %xmm5 -; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] -; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm5 -; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7] -; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm5 -; AVX2-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],ymm2[2,3] -; AVX2-NEXT: vmovaps 48(%rdx), %xmm7 -; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7] -; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm7 -; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7] -; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm7 -; AVX2-NEXT: vmovddup {{.*#+}} xmm8 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] -; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-NEXT: vmovaps %ymm1, 128(%rcx) -; AVX2-NEXT: vmovaps %ymm7, (%rcx) -; AVX2-NEXT: vmovaps %ymm6, 160(%rcx) -; AVX2-NEXT: vmovaps %ymm5, 96(%rcx) -; AVX2-NEXT: vmovaps %ymm4, 64(%rcx) +; AVX2-NEXT: vmovaps (%rsi), %ymm0 +; AVX2-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX2-NEXT: vmovaps (%rdx), %ymm2 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm3 +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm3[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm1[2,3,4,5],ymm4[6,7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3,4,5],ymm5[6,7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm3[4,5],ymm1[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm3, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm0, 128(%rcx) +; AVX2-NEXT: vmovaps %ymm6, (%rcx) +; AVX2-NEXT: vmovaps %ymm5, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm4, 160(%rcx) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i64_stride3_vf8: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,8,u,1,9,u,2,10> +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,8,3,4,9,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [10,1,2,11,4,5,12,7] ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,9,6,7] -; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,11,u,3,12,u,4,13> -; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,11,3,4,12,6,7] -; AVX512-NEXT: vpermi2q %zmm1, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,13,u,6,14,u,7,15> -; AVX512-NEXT: vpermi2q %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,14,3,4,15,6,7] -; AVX512-NEXT: vpermi2q %zmm0, %zmm3, %zmm1 -; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rcx) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,13,2,3,14,5,6,15] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rcx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32 @@ -342,301 +274,189 @@ define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vecptr1, <16 x i64>* %in.vecptr2, <48 x i64>* %out.vec) nounwind { ; SSE-LABEL: store_i64_stride3_vf16: ; SSE: # %bb.0: -; SSE-NEXT: subq $24, %rsp -; SSE-NEXT: movapd 64(%rdi), %xmm9 -; SSE-NEXT: movapd (%rdi), %xmm3 -; SSE-NEXT: movapd 16(%rdi), %xmm13 -; SSE-NEXT: movapd 32(%rdi), %xmm8 -; SSE-NEXT: movapd 48(%rdi), %xmm10 -; SSE-NEXT: movapd 64(%rsi), %xmm12 -; SSE-NEXT: movapd (%rsi), %xmm7 -; SSE-NEXT: movapd 16(%rsi), %xmm14 +; SSE-NEXT: movapd 80(%rsi), %xmm10 +; SSE-NEXT: movapd (%rsi), %xmm9 +; SSE-NEXT: movapd 16(%rsi), %xmm3 ; SSE-NEXT: movapd 32(%rsi), %xmm15 -; SSE-NEXT: movapd 48(%rsi), %xmm11 -; SSE-NEXT: movapd 64(%rdx), %xmm6 -; SSE-NEXT: movapd (%rdx), %xmm2 -; SSE-NEXT: movapd 16(%rdx), %xmm4 -; SSE-NEXT: movapd 32(%rdx), %xmm5 -; SSE-NEXT: movapd 48(%rdx), %xmm0 -; SSE-NEXT: movapd %xmm3, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm7[0] -; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill -; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1] -; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm2[1] -; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm13, %xmm3 -; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm14[0] -; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm4[0],xmm13[1] -; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm4[1] -; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movapd %xmm8, %xmm13 -; SSE-NEXT: unpcklpd {{.*#+}} xmm13 = xmm13[0],xmm15[0] -; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1] -; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm5[1] -; SSE-NEXT: movapd %xmm10, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm11[0] -; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] -; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1] -; SSE-NEXT: movapd %xmm9, %xmm14 -; SSE-NEXT: unpcklpd {{.*#+}} xmm14 = xmm14[0],xmm12[0] -; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm6[0],xmm9[1] -; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm6[1] -; SSE-NEXT: movapd 80(%rdi), %xmm8 -; SSE-NEXT: movapd 80(%rsi), %xmm6 -; SSE-NEXT: movapd %xmm8, %xmm9 -; SSE-NEXT: unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm6[0] -; SSE-NEXT: movapd 80(%rdx), %xmm0 -; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1] -; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm0[1] -; SSE-NEXT: movapd 96(%rdi), %xmm5 -; SSE-NEXT: movapd 96(%rsi), %xmm1 -; SSE-NEXT: movapd %xmm5, %xmm7 -; SSE-NEXT: unpcklpd {{.*#+}} xmm7 = xmm7[0],xmm1[0] -; SSE-NEXT: movapd 96(%rdx), %xmm2 -; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1] -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] -; SSE-NEXT: movapd 112(%rdi), %xmm2 -; SSE-NEXT: movapd 112(%rsi), %xmm0 -; SSE-NEXT: movapd %xmm2, %xmm3 -; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE-NEXT: movapd 112(%rdx), %xmm4 -; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1] -; SSE-NEXT: movapd %xmm0, 368(%rcx) -; SSE-NEXT: movapd %xmm2, 352(%rcx) -; SSE-NEXT: movapd %xmm3, 336(%rcx) -; SSE-NEXT: movapd %xmm1, 320(%rcx) -; SSE-NEXT: movapd %xmm5, 304(%rcx) -; SSE-NEXT: movapd %xmm7, 288(%rcx) -; SSE-NEXT: movapd %xmm6, 272(%rcx) -; SSE-NEXT: movapd %xmm8, 256(%rcx) -; SSE-NEXT: movapd %xmm9, 240(%rcx) -; SSE-NEXT: movapd %xmm12, 224(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 208(%rcx) -; SSE-NEXT: movapd %xmm14, 192(%rcx) -; SSE-NEXT: movapd %xmm11, 176(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 160(%rcx) -; SSE-NEXT: movapd %xmm10, 144(%rcx) -; SSE-NEXT: movapd %xmm15, 128(%rcx) +; SSE-NEXT: movapd 48(%rsi), %xmm14 +; SSE-NEXT: movapd 64(%rsi), %xmm13 +; SSE-NEXT: movapd 96(%rsi), %xmm12 +; SSE-NEXT: movapd 112(%rsi), %xmm11 +; SSE-NEXT: movapd (%rdx), %xmm8 +; SSE-NEXT: movapd 16(%rdx), %xmm7 +; SSE-NEXT: movapd 32(%rdx), %xmm6 +; SSE-NEXT: movapd 48(%rdx), %xmm5 +; SSE-NEXT: movapd 64(%rdx), %xmm4 +; SSE-NEXT: movaps 80(%rdx), %xmm1 +; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 96(%rdx), %xmm1 +; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm3, %xmm2 +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm8[0],xmm2[1] +; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm15[0],xmm8[1] +; SSE-NEXT: movapd 112(%rdx), %xmm1 +; SSE-NEXT: movapd %xmm15, 288(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm5[0],xmm15[1] +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm14[0],xmm5[1] +; SSE-NEXT: movapd %xmm14, 48(%rcx) +; SSE-NEXT: movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1] +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1] +; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm13, 192(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm7[0],xmm13[1] +; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm10[0],xmm7[1] +; SSE-NEXT: movapd %xmm10, 336(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm4[0],xmm10[1] +; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm12[0],xmm4[1] +; SSE-NEXT: movapd %xmm12, 96(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm1[0],xmm12[1] +; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm11[0],xmm1[1] +; SSE-NEXT: movapd %xmm11, 240(%rcx) +; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm6[0],xmm11[1] +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm9[0],xmm6[1] +; SSE-NEXT: movapd %xmm9, (%rcx) +; SSE-NEXT: movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1] +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1] +; SSE-NEXT: movapd %xmm3, 144(%rcx) +; SSE-NEXT: movapd %xmm2, 16(%rcx) +; SSE-NEXT: movapd %xmm8, 32(%rcx) +; SSE-NEXT: movapd %xmm13, 64(%rcx) +; SSE-NEXT: movapd %xmm7, 80(%rcx) +; SSE-NEXT: movapd %xmm11, 112(%rcx) +; SSE-NEXT: movapd %xmm6, 128(%rcx) +; SSE-NEXT: movapd %xmm15, 160(%rcx) +; SSE-NEXT: movapd %xmm5, 176(%rcx) +; SSE-NEXT: movapd %xmm10, 208(%rcx) +; SSE-NEXT: movapd %xmm4, 224(%rcx) +; SSE-NEXT: movapd %xmm9, 256(%rcx) +; SSE-NEXT: movapd %xmm0, 272(%rcx) +; SSE-NEXT: movapd %xmm14, 304(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 112(%rcx) -; SSE-NEXT: movapd %xmm13, 96(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 80(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%rcx) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%rcx) -; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%rcx) -; SSE-NEXT: addq $24, %rsp +; SSE-NEXT: movaps %xmm0, 320(%rcx) +; SSE-NEXT: movapd %xmm12, 352(%rcx) +; SSE-NEXT: movapd %xmm1, 368(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i64_stride3_vf16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd (%rdi), %ymm13 -; AVX1-NEXT: vmovapd 96(%rdi), %ymm14 -; AVX1-NEXT: vmovapd 32(%rdi), %ymm4 -; AVX1-NEXT: vmovapd 64(%rdi), %ymm7 -; AVX1-NEXT: vmovapd (%rdx), %ymm3 -; AVX1-NEXT: vmovapd 96(%rdx), %ymm5 -; AVX1-NEXT: vmovapd 32(%rdx), %ymm8 -; AVX1-NEXT: vmovapd 64(%rdx), %ymm10 -; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1 -; AVX1-NEXT: vmovaps (%rdi), %xmm6 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm0 -; AVX1-NEXT: vmovaps 64(%rdi), %xmm2 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm6[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm6 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7] -; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm2[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm9, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7] -; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm2 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm0[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm0 -; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7] -; AVX1-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm0 -; AVX1-NEXT: vmovaps 96(%rdi), %xmm2 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm11 = xmm2[0],mem[0] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm11, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] -; AVX1-NEXT: vmovapd 80(%rdx), %xmm0 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm7[2,3],ymm10[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3] -; AVX1-NEXT: vbroadcastsd 88(%rsi), %ymm2 -; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm2[2],ymm0[3] -; AVX1-NEXT: vmovapd 48(%rdx), %xmm0 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm4[2,3],ymm8[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3] -; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm2 -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3] -; AVX1-NEXT: vmovapd 112(%rdx), %xmm2 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm14[2,3],ymm5[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3] -; AVX1-NEXT: vbroadcastsd 120(%rsi), %ymm15 -; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2],ymm2[3] -; AVX1-NEXT: vmovapd 16(%rdx), %xmm15 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm13[2,3],ymm3[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3] -; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm15 -; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm15 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm15[0],ymm10[1],ymm15[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3] -; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3] -; AVX1-NEXT: vmovapd %ymm3, 32(%rcx) -; AVX1-NEXT: vmovapd %ymm5, 320(%rcx) -; AVX1-NEXT: vmovapd %ymm4, 128(%rcx) -; AVX1-NEXT: vmovapd %ymm7, 224(%rcx) -; AVX1-NEXT: vmovapd %ymm1, 64(%rcx) -; AVX1-NEXT: vmovapd %ymm2, 352(%rcx) -; AVX1-NEXT: vmovapd %ymm0, 160(%rcx) -; AVX1-NEXT: vmovapd %ymm12, 256(%rcx) -; AVX1-NEXT: vmovaps %ymm11, 288(%rcx) -; AVX1-NEXT: vmovaps %ymm9, 96(%rcx) -; AVX1-NEXT: vmovaps %ymm6, 192(%rcx) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vmovaps (%rsi), %ymm0 +; AVX1-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX1-NEXT: vmovaps 64(%rsi), %ymm2 +; AVX1-NEXT: vmovaps 96(%rsi), %ymm3 +; AVX1-NEXT: vmovaps (%rdx), %ymm4 +; AVX1-NEXT: vmovaps 32(%rdx), %ymm5 +; AVX1-NEXT: vmovaps 64(%rdx), %ymm6 +; AVX1-NEXT: vmovaps 96(%rdx), %ymm7 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5],ymm6[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm6[2,3],ymm0[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3,4,5],ymm7[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3],ymm3[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3,4,5],ymm5[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm5[2,3],ymm1[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3,4,5],ymm4[6,7] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm2[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3],ymm12[4,5],ymm11[6,7] +; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7] +; AVX1-NEXT: vinsertf128 $1, 64(%rdx), %ymm2, %ymm13 +; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7] +; AVX1-NEXT: vinsertf128 $1, 96(%rdx), %ymm1, %ymm14 +; AVX1-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm1[2,3],ymm14[4,5],ymm1[6,7] +; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm3, %ymm15 +; AVX1-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1],ymm3[2,3],ymm15[4,5],ymm3[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2,3],ymm2[4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm1, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm0, 128(%rcx) +; AVX1-NEXT: vmovaps %ymm2, 320(%rcx) +; AVX1-NEXT: vmovaps %ymm3, 224(%rcx) +; AVX1-NEXT: vmovaps %ymm11, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm15, 96(%rcx) +; AVX1-NEXT: vmovaps %ymm10, 160(%rcx) +; AVX1-NEXT: vmovaps %ymm14, 288(%rcx) +; AVX1-NEXT: vmovaps %ymm9, 352(%rcx) +; AVX1-NEXT: vmovaps %ymm13, 192(%rcx) +; AVX1-NEXT: vmovaps %ymm8, 256(%rcx) +; AVX1-NEXT: vmovaps %ymm12, (%rcx) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride3_vf16: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-NEXT: vmovaps 32(%rdi), %ymm4 -; AVX2-NEXT: vmovaps 64(%rdi), %ymm7 -; AVX2-NEXT: vmovaps 96(%rdi), %ymm3 -; AVX2-NEXT: vmovaps (%rdx), %ymm2 -; AVX2-NEXT: vmovaps 96(%rdx), %ymm6 -; AVX2-NEXT: vmovaps 32(%rdx), %ymm8 -; AVX2-NEXT: vmovaps 64(%rdx), %ymm10 -; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1 -; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1],ymm5[2,3],ymm9[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7] -; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm5 -; AVX2-NEXT: vmovddup {{.*#+}} xmm9 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm7[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5],ymm9[6,7] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],ymm10[2,3] -; AVX2-NEXT: vmovaps 80(%rdx), %xmm11 -; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7] -; AVX2-NEXT: vbroadcastsd 88(%rsi), %ymm11 -; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5],ymm9[6,7] -; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm11 -; AVX2-NEXT: vmovddup {{.*#+}} xmm12 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm4[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm8[2,3] -; AVX2-NEXT: vmovaps 48(%rdx), %xmm13 -; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7] -; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm13 -; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7] -; AVX2-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm13 -; AVX2-NEXT: vmovddup {{.*#+}} xmm14 = mem[0,0] -; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm3[0,1,2,1] -; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm3[2,3],ymm6[2,3] -; AVX2-NEXT: vmovaps 112(%rdx), %xmm15 -; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7] -; AVX2-NEXT: vbroadcastsd 120(%rsi), %ymm15 -; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm2[2,3] -; AVX2-NEXT: vmovaps 16(%rdx), %xmm1 -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7] -; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm15 -; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5],ymm1[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3],ymm15[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5],ymm10[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5],ymm6[6,7] -; AVX2-NEXT: vpermilps {{.*#+}} ymm6 = mem[2,3,0,1,6,7,4,5] -; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] -; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) -; AVX2-NEXT: vmovaps %ymm3, 320(%rcx) -; AVX2-NEXT: vmovaps %ymm4, 128(%rcx) -; AVX2-NEXT: vmovaps %ymm7, 224(%rcx) -; AVX2-NEXT: vmovaps %ymm1, 64(%rcx) -; AVX2-NEXT: vmovaps %ymm14, 352(%rcx) -; AVX2-NEXT: vmovaps %ymm13, 288(%rcx) -; AVX2-NEXT: vmovaps %ymm12, 160(%rcx) -; AVX2-NEXT: vmovaps %ymm11, 96(%rcx) -; AVX2-NEXT: vmovaps %ymm9, 256(%rcx) -; AVX2-NEXT: vmovaps %ymm5, 192(%rcx) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps (%rsi), %ymm0 +; AVX2-NEXT: vmovaps 32(%rsi), %ymm1 +; AVX2-NEXT: vmovaps 64(%rsi), %ymm2 +; AVX2-NEXT: vmovaps 96(%rsi), %ymm3 +; AVX2-NEXT: vmovaps (%rdx), %ymm4 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm5 +; AVX2-NEXT: vmovaps 64(%rdx), %ymm6 +; AVX2-NEXT: vmovaps 96(%rdx), %ymm7 +; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm6[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm0[2,3,4,5],ymm8[6,7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm7[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm3[2,3,4,5],ymm9[6,7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm5[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm1[2,3,4,5],ymm10[6,7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm4[2,1,2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm2[2,3,4,5],ymm11[6,7] +; AVX2-NEXT: vbroadcastsd (%rdx), %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm12[4,5],ymm0[6,7] +; AVX2-NEXT: vbroadcastsd 64(%rdx), %ymm13 +; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm13[4,5],ymm2[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm6[2,3],ymm3[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vbroadcastsd 96(%rdx), %ymm7 +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm7[4,5],ymm1[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm5 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm3, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm0, 128(%rcx) +; AVX2-NEXT: vmovaps %ymm7, 288(%rcx) +; AVX2-NEXT: vmovaps %ymm2, 320(%rcx) +; AVX2-NEXT: vmovaps %ymm6, 224(%rcx) +; AVX2-NEXT: vmovaps %ymm13, 192(%rcx) +; AVX2-NEXT: vmovaps %ymm12, (%rcx) +; AVX2-NEXT: vmovaps %ymm11, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm10, 160(%rcx) +; AVX2-NEXT: vmovaps %ymm9, 352(%rcx) +; AVX2-NEXT: vmovaps %ymm8, 256(%rcx) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i64_stride3_vf16: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm2 -; AVX512-NEXT: vmovdqu64 64(%rsi), %zmm3 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm4 -; AVX512-NEXT: vmovdqu64 64(%rdx), %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,8,u,1,9,u,2,10> -; AVX512-NEXT: vmovdqa64 %zmm0, %zmm7 -; AVX512-NEXT: vpermt2q %zmm2, %zmm6, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,8,3,4,9,6,7] -; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm7 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <5,13,u,6,14,u,7,15> -; AVX512-NEXT: vmovdqa64 %zmm3, %zmm10 -; AVX512-NEXT: vpermt2q %zmm5, %zmm9, %zmm10 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,14,3,4,15,6,7] -; AVX512-NEXT: vpermt2q %zmm1, %zmm11, %zmm10 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <2,11,u,3,12,u,4,13> -; AVX512-NEXT: vmovdqa64 %zmm5, %zmm13 -; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,11,3,4,12,6,7] -; AVX512-NEXT: vpermt2q %zmm3, %zmm14, %zmm13 -; AVX512-NEXT: vpermt2q %zmm3, %zmm6, %zmm1 -; AVX512-NEXT: vpermt2q %zmm5, %zmm8, %zmm1 -; AVX512-NEXT: vpermi2q %zmm4, %zmm2, %zmm9 -; AVX512-NEXT: vpermt2q %zmm0, %zmm11, %zmm9 -; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm4 -; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm4 -; AVX512-NEXT: vmovdqu64 %zmm4, 64(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm9, 128(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm1, 192(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm13, 256(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm10, 320(%rcx) -; AVX512-NEXT: vmovdqu64 %zmm7, (%rcx) +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm0 +; AVX512-NEXT: vmovdqu64 64(%rsi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 +; AVX512-NEXT: vmovdqu64 64(%rdx), %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,9,6,7] +; AVX512-NEXT: vmovdqa64 %zmm0, %zmm5 +; AVX512-NEXT: vpermt2q %zmm2, %zmm4, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,13,2,3,14,5,6,15] +; AVX512-NEXT: vmovdqa64 %zmm1, %zmm7 +; AVX512-NEXT: vpermt2q %zmm3, %zmm6, %zmm7 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [10,1,2,11,4,5,12,7] +; AVX512-NEXT: vmovdqa64 %zmm0, %zmm9 +; AVX512-NEXT: vpermt2q %zmm3, %zmm8, %zmm9 +; AVX512-NEXT: vpermi2q %zmm3, %zmm1, %zmm4 +; AVX512-NEXT: vpermt2q %zmm2, %zmm6, %zmm0 +; AVX512-NEXT: vpermt2q %zmm2, %zmm8, %zmm1 +; AVX512-NEXT: vmovdqu64 %zmm1, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm4, 192(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm9, 256(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm7, 320(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm5, (%rcx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <16 x i64>, <16 x i64>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll @@ -258,107 +258,107 @@ ; ; AVX1-LABEL: store_i64_stride4_vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps 32(%rdx), %ymm2 -; AVX1-NEXT: vmovaps (%rdx), %ymm3 +; AVX1-NEXT: vmovaps (%rdx), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdx), %ymm1 +; AVX1-NEXT: vmovaps (%rcx), %ymm3 ; AVX1-NEXT: vmovaps 32(%rcx), %ymm4 -; AVX1-NEXT: vmovaps (%rcx), %ymm5 -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm5[0],ymm3[2],ymm5[2] -; AVX1-NEXT: vmovaps 16(%rsi), %xmm6 -; AVX1-NEXT: vmovaps 16(%rdi), %xmm7 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm7[0],xmm6[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm2[1],ymm4[1],ymm2[3],ymm4[3] -; AVX1-NEXT: vmovaps 48(%rsi), %xmm0 -; AVX1-NEXT: vmovaps 48(%rdi), %xmm1 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm0[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5,6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm5[1],ymm3[3],ymm5[3] -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7] -; AVX1-NEXT: vmovaps 32(%rsi), %xmm2 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm3 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm3[1],xmm2[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0] -; AVX1-NEXT: vmovaps (%rsi), %xmm3 -; AVX1-NEXT: vmovaps (%rdi), %xmm5 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm5[1],xmm3[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm5[0],xmm3[0] -; AVX1-NEXT: vmovaps (%rcx), %xmm5 +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm4[0],ymm1[2],ymm4[2] +; AVX1-NEXT: vmovaps 48(%rsi), %xmm5 +; AVX1-NEXT: vmovaps 48(%rdi), %xmm6 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0] +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm5[1] +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] +; AVX1-NEXT: vmovaps 16(%rsi), %xmm5 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm6 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0] +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm4[4,5,6,7] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm5[1] +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5,6,7] +; AVX1-NEXT: vmovaps 32(%rsi), %xmm3 +; AVX1-NEXT: vmovaps 32(%rdi), %xmm5 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm5[0],xmm3[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm3[1] +; AVX1-NEXT: vmovaps (%rsi), %xmm5 +; AVX1-NEXT: vmovaps (%rdi), %xmm7 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm5[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1] ; AVX1-NEXT: vmovaps 32(%rcx), %xmm7 -; AVX1-NEXT: vmovaps (%rdx), %xmm0 ; AVX1-NEXT: vmovaps 32(%rdx), %xmm1 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm1[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0] -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm5[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; AVX1-NEXT: vmovaps %xmm0, 16(%r8) -; AVX1-NEXT: vmovaps %xmm7, 48(%r8) -; AVX1-NEXT: vmovaps %xmm1, 144(%r8) -; AVX1-NEXT: vmovaps %xmm4, 176(%r8) -; AVX1-NEXT: vmovaps %xmm3, (%r8) -; AVX1-NEXT: vmovaps %xmm6, 32(%r8) -; AVX1-NEXT: vmovaps %xmm2, 128(%r8) -; AVX1-NEXT: vmovaps %xmm12, 160(%r8) +; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1] +; AVX1-NEXT: vmovaps (%rcx), %xmm7 +; AVX1-NEXT: vmovaps (%rdx), %xmm2 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm7[1] +; AVX1-NEXT: vmovaps %xmm2, 48(%r8) +; AVX1-NEXT: vmovaps %xmm6, 16(%r8) +; AVX1-NEXT: vmovaps %xmm1, 176(%r8) +; AVX1-NEXT: vmovaps %xmm4, 144(%r8) +; AVX1-NEXT: vmovaps %xmm5, 32(%r8) +; AVX1-NEXT: vmovaps %xmm0, (%r8) +; AVX1-NEXT: vmovaps %xmm3, 160(%r8) +; AVX1-NEXT: vmovaps %xmm12, 128(%r8) ; AVX1-NEXT: vmovaps %ymm11, 96(%r8) -; AVX1-NEXT: vmovaps %ymm10, 192(%r8) +; AVX1-NEXT: vmovaps %ymm10, 64(%r8) ; AVX1-NEXT: vmovaps %ymm9, 224(%r8) -; AVX1-NEXT: vmovaps %ymm8, 64(%r8) +; AVX1-NEXT: vmovaps %ymm8, 192(%r8) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride4_vf8: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps 32(%rdi), %ymm2 -; AVX2-NEXT: vmovaps (%rdi), %ymm3 +; AVX2-NEXT: vmovaps (%rdi), %ymm2 +; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX2-NEXT: vmovaps (%rsi), %ymm3 ; AVX2-NEXT: vmovaps 32(%rsi), %ymm4 -; AVX2-NEXT: vmovaps (%rsi), %ymm5 +; AVX2-NEXT: vmovaps (%rdx), %ymm5 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm6 -; AVX2-NEXT: vmovaps (%rdx), %ymm7 +; AVX2-NEXT: vmovaps (%rcx), %ymm7 ; AVX2-NEXT: vmovaps 32(%rcx), %ymm8 -; AVX2-NEXT: vmovaps (%rcx), %ymm9 -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm9[0],ymm7[2],ymm9[2] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm3[0],ymm5[0],ymm3[2],ymm5[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],ymm0[2,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm8[1],ymm6[3],ymm8[3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm2[1],ymm4[1],ymm2[3],ymm4[3] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm10[2,3],ymm1[2,3] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[2],ymm8[2] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm2[2,3],ymm6[2,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm9[1],ymm7[3],ymm9[3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm3[2,3],ymm4[2,3] -; AVX2-NEXT: vmovaps (%rsi), %xmm4 +; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm8[0],ymm6[2],ymm8[2] +; AVX2-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm4[0],ymm1[2],ymm4[2] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm0[2,3] +; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3] +; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm1[2,3],ymm6[2,3] +; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm7[0],ymm5[2],ymm7[2] +; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm6[2,3],ymm4[2,3] +; AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm7[1],ymm5[3],ymm7[3] +; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm2[2,3],ymm5[2,3] +; AVX2-NEXT: vmovaps (%rsi), %xmm3 ; AVX2-NEXT: vmovaps 32(%rsi), %xmm5 ; AVX2-NEXT: vmovaps (%rdi), %xmm6 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm7 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0] ; AVX2-NEXT: vmovaps (%rcx), %xmm1 -; AVX2-NEXT: vmovaps 32(%rcx), %xmm2 -; AVX2-NEXT: vmovaps (%rdx), %xmm3 +; AVX2-NEXT: vmovaps 32(%rcx), %xmm4 +; AVX2-NEXT: vmovaps (%rdx), %xmm2 ; AVX2-NEXT: vmovaps 32(%rdx), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm2[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm6[1],xmm4[1] -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm4[0] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0] -; AVX2-NEXT: vmovaps %xmm1, 16(%r8) +; AVX2-NEXT: vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm4[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm3[0] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm1[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1] +; AVX2-NEXT: vmovaps %xmm1, 48(%r8) +; AVX2-NEXT: vmovaps %xmm3, 32(%r8) +; AVX2-NEXT: vmovaps %xmm7, 16(%r8) ; AVX2-NEXT: vmovaps %xmm4, (%r8) -; AVX2-NEXT: vmovaps %xmm7, 48(%r8) -; AVX2-NEXT: vmovaps %xmm2, 32(%r8) -; AVX2-NEXT: vmovaps %xmm0, 144(%r8) -; AVX2-NEXT: vmovaps %xmm5, 128(%r8) -; AVX2-NEXT: vmovaps %xmm13, 176(%r8) -; AVX2-NEXT: vmovaps %xmm12, 160(%r8) -; AVX2-NEXT: vmovaps %ymm9, 96(%r8) -; AVX2-NEXT: vmovaps %ymm8, 192(%r8) -; AVX2-NEXT: vmovaps %ymm10, 224(%r8) -; AVX2-NEXT: vmovaps %ymm11, 64(%r8) +; AVX2-NEXT: vmovaps %xmm0, 176(%r8) +; AVX2-NEXT: vmovaps %xmm5, 160(%r8) +; AVX2-NEXT: vmovaps %xmm13, 144(%r8) +; AVX2-NEXT: vmovaps %xmm12, 128(%r8) +; AVX2-NEXT: vmovaps %ymm11, 96(%r8) +; AVX2-NEXT: vmovaps %ymm10, 64(%r8) +; AVX2-NEXT: vmovaps %ymm8, 224(%r8) +; AVX2-NEXT: vmovaps %ymm9, 192(%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -617,54 +617,54 @@ ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vmovaps 64(%rsi), %xmm2 ; AVX1-NEXT: vmovaps 64(%rdi), %xmm3 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm2[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 32(%rcx), %xmm4 ; AVX1-NEXT: vmovaps 64(%rcx), %xmm5 ; AVX1-NEXT: vmovaps 64(%rdx), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm7[1],xmm5[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm14 = xmm3[0],xmm2[0] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm13 = xmm7[0],xmm5[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm15 = xmm7[0],xmm5[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm3[1],xmm2[1] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm7[1],xmm5[1] ; AVX1-NEXT: vmovaps 32(%rsi), %xmm5 ; AVX1-NEXT: vmovaps 32(%rdi), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm10 = xmm7[0],xmm5[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm7[1],xmm5[1] ; AVX1-NEXT: vmovaps 32(%rdx), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm7[1],xmm4[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm4[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm11 = xmm7[0],xmm4[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm7[1],xmm4[1] ; AVX1-NEXT: vmovaps 96(%rsi), %xmm7 ; AVX1-NEXT: vmovaps 96(%rdi), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm0[0],xmm7[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm7[1] ; AVX1-NEXT: vmovaps 96(%rcx), %xmm7 ; AVX1-NEXT: vmovaps 96(%rdx), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm7[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm7[1] ; AVX1-NEXT: vmovaps (%rsi), %xmm7 ; AVX1-NEXT: vmovaps (%rdi), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm7[0] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm7[1] ; AVX1-NEXT: vmovaps (%rcx), %xmm7 ; AVX1-NEXT: vmovaps (%rdx), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0] -; AVX1-NEXT: vmovaps %xmm0, 16(%r8) -; AVX1-NEXT: vmovaps %xmm2, (%r8) -; AVX1-NEXT: vmovaps %xmm1, 48(%r8) -; AVX1-NEXT: vmovaps %xmm3, 32(%r8) -; AVX1-NEXT: vmovaps %xmm4, 400(%r8) -; AVX1-NEXT: vmovaps %xmm6, 384(%r8) -; AVX1-NEXT: vmovaps %xmm5, 432(%r8) -; AVX1-NEXT: vmovaps %xmm8, 416(%r8) -; AVX1-NEXT: vmovaps %xmm9, 144(%r8) -; AVX1-NEXT: vmovaps %xmm10, 128(%r8) -; AVX1-NEXT: vmovaps %xmm11, 176(%r8) -; AVX1-NEXT: vmovaps %xmm12, 160(%r8) -; AVX1-NEXT: vmovaps %xmm13, 272(%r8) -; AVX1-NEXT: vmovaps %xmm14, 256(%r8) -; AVX1-NEXT: vmovaps %xmm15, 304(%r8) +; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm7[0] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1] +; AVX1-NEXT: vmovaps %xmm0, 48(%r8) +; AVX1-NEXT: vmovaps %xmm2, 32(%r8) +; AVX1-NEXT: vmovaps %xmm1, 16(%r8) +; AVX1-NEXT: vmovaps %xmm3, (%r8) +; AVX1-NEXT: vmovaps %xmm4, 432(%r8) +; AVX1-NEXT: vmovaps %xmm6, 416(%r8) +; AVX1-NEXT: vmovaps %xmm5, 400(%r8) +; AVX1-NEXT: vmovaps %xmm8, 384(%r8) +; AVX1-NEXT: vmovaps %xmm9, 176(%r8) +; AVX1-NEXT: vmovaps %xmm10, 160(%r8) +; AVX1-NEXT: vmovaps %xmm11, 144(%r8) +; AVX1-NEXT: vmovaps %xmm12, 128(%r8) +; AVX1-NEXT: vmovaps %xmm13, 304(%r8) +; AVX1-NEXT: vmovaps %xmm14, 288(%r8) +; AVX1-NEXT: vmovaps %xmm15, 272(%r8) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX1-NEXT: vmovaps %xmm0, 288(%r8) +; AVX1-NEXT: vmovaps %xmm0, 256(%r8) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm0, 448(%r8) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload @@ -740,52 +740,52 @@ ; AVX2-NEXT: vmovaps 64(%rsi), %xmm3 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm4 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm5 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm3[1] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm3[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 32(%rcx), %xmm6 ; AVX2-NEXT: vmovaps 64(%rcx), %xmm7 ; AVX2-NEXT: vmovaps 64(%rdx), %xmm0 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm14 = xmm5[0],xmm3[0] -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm0[1],xmm7[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm7[0] -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm4[1],xmm2[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm5[1],xmm3[1] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm15 = xmm0[0],xmm7[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm7[1] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm12 = xmm4[0],xmm2[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm4[1],xmm2[1] ; AVX2-NEXT: vmovaps 32(%rdx), %xmm4 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm6[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm9 = xmm4[0],xmm6[0] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm11 = xmm4[0],xmm6[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm4[1],xmm6[1] ; AVX2-NEXT: vmovaps 96(%rsi), %xmm6 ; AVX2-NEXT: vmovaps 96(%rdi), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm6[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm0[0],xmm6[0] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm6[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm6[1] ; AVX2-NEXT: vmovaps 96(%rcx), %xmm6 ; AVX2-NEXT: vmovaps 96(%rdx), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm6[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm6[0] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm6[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm6[1] ; AVX2-NEXT: vmovaps (%rsi), %xmm6 ; AVX2-NEXT: vmovaps (%rdi), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm6[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm6[0] +; AVX2-NEXT: vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm6[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1] ; AVX2-NEXT: vmovaps (%rcx), %xmm6 ; AVX2-NEXT: vmovaps (%rdx), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm6[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0] -; AVX2-NEXT: vmovaps %xmm0, 16(%r8) -; AVX2-NEXT: vmovaps %xmm2, (%r8) -; AVX2-NEXT: vmovaps %xmm1, 48(%r8) -; AVX2-NEXT: vmovaps %xmm3, 32(%r8) -; AVX2-NEXT: vmovaps %xmm4, 400(%r8) -; AVX2-NEXT: vmovaps %xmm7, 384(%r8) -; AVX2-NEXT: vmovaps %xmm5, 432(%r8) -; AVX2-NEXT: vmovaps %xmm8, 416(%r8) -; AVX2-NEXT: vmovaps %xmm9, 144(%r8) -; AVX2-NEXT: vmovaps %xmm10, 128(%r8) -; AVX2-NEXT: vmovaps %xmm11, 176(%r8) -; AVX2-NEXT: vmovaps %xmm12, 160(%r8) -; AVX2-NEXT: vmovaps %xmm13, 272(%r8) -; AVX2-NEXT: vmovaps %xmm14, 256(%r8) -; AVX2-NEXT: vmovaps %xmm15, 304(%r8) +; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm6[0] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] +; AVX2-NEXT: vmovaps %xmm0, 48(%r8) +; AVX2-NEXT: vmovaps %xmm2, 32(%r8) +; AVX2-NEXT: vmovaps %xmm1, 16(%r8) +; AVX2-NEXT: vmovaps %xmm3, (%r8) +; AVX2-NEXT: vmovaps %xmm4, 432(%r8) +; AVX2-NEXT: vmovaps %xmm7, 416(%r8) +; AVX2-NEXT: vmovaps %xmm5, 400(%r8) +; AVX2-NEXT: vmovaps %xmm8, 384(%r8) +; AVX2-NEXT: vmovaps %xmm9, 176(%r8) +; AVX2-NEXT: vmovaps %xmm10, 160(%r8) +; AVX2-NEXT: vmovaps %xmm11, 144(%r8) +; AVX2-NEXT: vmovaps %xmm12, 128(%r8) +; AVX2-NEXT: vmovaps %xmm13, 304(%r8) +; AVX2-NEXT: vmovaps %xmm14, 288(%r8) +; AVX2-NEXT: vmovaps %xmm15, 272(%r8) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; AVX2-NEXT: vmovaps %xmm0, 288(%r8) +; AVX2-NEXT: vmovaps %xmm0, 256(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm0, 448(%r8) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll @@ -122,158 +122,95 @@ ; SSE-LABEL: store_i64_stride6_vf4: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps (%rdi), %xmm0 -; SSE-NEXT: movaps 16(%rdi), %xmm15 -; SSE-NEXT: movaps (%rsi), %xmm8 -; SSE-NEXT: movaps 16(%rsi), %xmm10 -; SSE-NEXT: movaps (%rdx), %xmm6 +; SSE-NEXT: movaps (%rdx), %xmm8 ; SSE-NEXT: movaps 16(%rdx), %xmm1 -; SSE-NEXT: movaps (%rcx), %xmm9 -; SSE-NEXT: movaps 16(%rcx), %xmm5 -; SSE-NEXT: movaps (%r8), %xmm7 -; SSE-NEXT: movaps 16(%r8), %xmm4 -; SSE-NEXT: movaps (%r9), %xmm11 -; SSE-NEXT: movaps 16(%r9), %xmm3 -; SSE-NEXT: movaps %xmm4, %xmm12 -; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1] -; SSE-NEXT: movaps %xmm1, %xmm13 -; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm5[1] -; SSE-NEXT: movaps %xmm15, %xmm14 -; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm10[1] -; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm3[0] -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm5[0] -; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm10[0] -; SSE-NEXT: movaps %xmm7, %xmm3 -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm11[1] -; SSE-NEXT: movaps %xmm6, %xmm5 -; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm9[1] -; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm8[1] -; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm11[0] -; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm9[0] -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm8[0] -; SSE-NEXT: movaps %xmm0, (%rax) -; SSE-NEXT: movaps %xmm6, 16(%rax) -; SSE-NEXT: movaps %xmm7, 32(%rax) -; SSE-NEXT: movaps %xmm2, 48(%rax) -; SSE-NEXT: movaps %xmm5, 64(%rax) -; SSE-NEXT: movaps %xmm3, 80(%rax) -; SSE-NEXT: movaps %xmm15, 96(%rax) -; SSE-NEXT: movaps %xmm1, 112(%rax) -; SSE-NEXT: movaps %xmm4, 128(%rax) -; SSE-NEXT: movaps %xmm14, 144(%rax) -; SSE-NEXT: movaps %xmm13, 160(%rax) -; SSE-NEXT: movaps %xmm12, 176(%rax) +; SSE-NEXT: movaps (%rcx), %xmm2 +; SSE-NEXT: movaps 16(%rcx), %xmm3 +; SSE-NEXT: movapd (%r8), %xmm4 +; SSE-NEXT: movapd 16(%r8), %xmm5 +; SSE-NEXT: movapd (%r9), %xmm6 +; SSE-NEXT: movapd 16(%r9), %xmm7 +; SSE-NEXT: movapd %xmm4, %xmm0 +; SSE-NEXT: movapd %xmm4, 64(%rax) +; SSE-NEXT: movapd %xmm4, 160(%rax) +; SSE-NEXT: unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm7[0] +; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1] +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm6[0] +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1] +; SSE-NEXT: movaps %xmm8, (%rax) +; SSE-NEXT: movaps %xmm1, 16(%rax) +; SSE-NEXT: movapd %xmm5, 48(%rax) +; SSE-NEXT: movaps %xmm2, 96(%rax) +; SSE-NEXT: movaps %xmm3, 112(%rax) +; SSE-NEXT: movapd %xmm5, 144(%rax) +; SSE-NEXT: movapd %xmm0, 32(%rax) +; SSE-NEXT: movapd %xmm6, 80(%rax) +; SSE-NEXT: movapd %xmm4, 128(%rax) +; SSE-NEXT: movapd %xmm7, 176(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i64_stride6_vf4: ; AVX1: # %bb.0: ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps (%rdi), %ymm0 -; AVX1-NEXT: vmovaps (%rdx), %ymm1 -; AVX1-NEXT: vmovaps (%r8), %ymm2 -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] -; AVX1-NEXT: vmovaps 16(%rdi), %xmm3 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm1[4,5,6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3] -; AVX1-NEXT: vmovaps 16(%rdx), %xmm3 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3],ymm2[4,5,6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] -; AVX1-NEXT: vmovaps 16(%r8), %xmm3 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vmovaps (%rcx), %xmm3 -; AVX1-NEXT: vmovaps (%rdx), %xmm4 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm3[1] -; AVX1-NEXT: vmovaps (%r9), %xmm6 -; AVX1-NEXT: vmovaps (%r8), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0] -; AVX1-NEXT: vmovaps (%rsi), %xmm7 -; AVX1-NEXT: vmovaps (%rdi), %xmm2 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0] -; AVX1-NEXT: vmovaps %xmm3, 16(%rax) -; AVX1-NEXT: vmovaps %xmm2, (%rax) -; AVX1-NEXT: vmovaps %xmm0, 48(%rax) -; AVX1-NEXT: vmovaps %xmm6, 32(%rax) -; AVX1-NEXT: vmovaps %xmm1, 80(%rax) -; AVX1-NEXT: vmovaps %xmm5, 64(%rax) -; AVX1-NEXT: vmovaps %ymm10, 128(%rax) -; AVX1-NEXT: vmovaps %ymm9, 160(%rax) -; AVX1-NEXT: vmovaps %ymm8, 96(%rax) +; AVX1-NEXT: vmovaps (%rdx), %ymm0 +; AVX1-NEXT: vmovaps (%rcx), %ymm1 +; AVX1-NEXT: vmovapd (%r8), %ymm2 +; AVX1-NEXT: vmovapd (%r9), %ymm3 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm2[0],ymm4[0],ymm2[2],ymm4[3] +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm2[0],ymm5[1],ymm2[2,3] +; AVX1-NEXT: vinsertf128 $1, (%r9), %ymm2, %ymm6 +; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm2[0],ymm6[1],ymm2[2],ymm6[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3] +; AVX1-NEXT: vmovaps %ymm0, (%rax) +; AVX1-NEXT: vmovapd %ymm2, 160(%rax) +; AVX1-NEXT: vmovaps %ymm1, 96(%rax) +; AVX1-NEXT: vmovapd %ymm6, 64(%rax) +; AVX1-NEXT: vmovapd %ymm5, 32(%rax) +; AVX1-NEXT: vmovapd %ymm4, 128(%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride6_vf4: ; AVX2: # %bb.0: ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: vmovaps (%rdi), %ymm0 -; AVX2-NEXT: vmovaps (%rsi), %ymm1 -; AVX2-NEXT: vmovaps (%rdx), %ymm2 -; AVX2-NEXT: vmovaps (%rcx), %ymm3 -; AVX2-NEXT: vmovaps (%r8), %ymm4 -; AVX2-NEXT: vmovaps (%r9), %ymm5 -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm2[2,3],ymm7[2,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[2,3] -; AVX2-NEXT: vmovaps (%rcx), %xmm1 -; AVX2-NEXT: vmovaps (%rdx), %xmm3 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1] -; AVX2-NEXT: vmovaps (%r9), %xmm5 -; AVX2-NEXT: vmovaps (%r8), %xmm7 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm5[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0] -; AVX2-NEXT: vmovaps (%rsi), %xmm7 -; AVX2-NEXT: vmovaps (%rdi), %xmm2 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0] -; AVX2-NEXT: vmovaps %xmm1, 16(%rax) -; AVX2-NEXT: vmovaps %xmm2, (%rax) -; AVX2-NEXT: vmovaps %xmm0, 48(%rax) -; AVX2-NEXT: vmovaps %xmm5, 32(%rax) -; AVX2-NEXT: vmovaps %xmm6, 80(%rax) -; AVX2-NEXT: vmovaps %xmm4, 64(%rax) -; AVX2-NEXT: vmovaps %ymm10, 128(%rax) -; AVX2-NEXT: vmovaps %ymm9, 160(%rax) -; AVX2-NEXT: vmovaps %ymm8, 96(%rax) +; AVX2-NEXT: vmovaps (%rdx), %ymm0 +; AVX2-NEXT: vmovaps (%rcx), %ymm1 +; AVX2-NEXT: vmovaps (%r8), %ymm2 +; AVX2-NEXT: vmovaps (%r9), %xmm3 +; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = xmm3[0,0] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3,4,5],ymm3[6,7] +; AVX2-NEXT: vbroadcastsd 16(%r9), %ymm5 +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1],ymm5[2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],mem[6,7] +; AVX2-NEXT: vmovaps %ymm0, (%rax) +; AVX2-NEXT: vmovaps %ymm2, 160(%rax) +; AVX2-NEXT: vmovaps %ymm5, 128(%rax) +; AVX2-NEXT: vmovaps %ymm1, 96(%rax) +; AVX2-NEXT: vmovaps %ymm3, 64(%rax) +; AVX2-NEXT: vmovaps %ymm4, 32(%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i64_stride6_vf4: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512-NEXT: vmovdqa (%rdx), %ymm1 -; AVX512-NEXT: vmovdqa (%r8), %ymm2 -; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1 -; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,4,8,12,u,u,1,5> +; AVX512-NEXT: vmovdqa (%rdx), %ymm0 +; AVX512-NEXT: vmovdqa (%r8), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm0, %zmm0 +; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm1, %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,12,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,9,13,4,5,6,7] ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 -; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,8,12,6,7] -; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,5,9,13,u,u,2,6> -; AVX512-NEXT: vpermi2q %zmm2, %zmm1, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,10,14,6,7] -; AVX512-NEXT: vpermi2q %zmm0, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,6,11,15,u,u,3,7> -; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,11,15,6,7] -; AVX512-NEXT: vpermi2q %zmm1, %zmm3, %zmm0 -; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rax) -; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rax) -; AVX512-NEXT: vmovdqu64 %zmm4, (%rax) +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [10,14,2,3,4,5,11,15] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <4 x i64>, <4 x i64>* %in.vecptr0, align 32 @@ -299,359 +236,146 @@ define void @store_i64_stride6_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr1, <8 x i64>* %in.vecptr2, <8 x i64>* %in.vecptr3, <8 x i64>* %in.vecptr4, <8 x i64>* %in.vecptr5, <48 x i64>* %out.vec) nounwind { ; SSE-LABEL: store_i64_stride6_vf8: ; SSE: # %bb.0: -; SSE-NEXT: subq $24, %rsp -; SSE-NEXT: movaps (%rdi), %xmm7 -; SSE-NEXT: movaps 16(%rdi), %xmm10 -; SSE-NEXT: movaps 32(%rdi), %xmm15 -; SSE-NEXT: movaps (%rsi), %xmm11 -; SSE-NEXT: movaps 16(%rsi), %xmm4 -; SSE-NEXT: movaps 32(%rsi), %xmm8 -; SSE-NEXT: movaps (%rdx), %xmm12 -; SSE-NEXT: movaps 16(%rdx), %xmm9 -; SSE-NEXT: movaps 32(%rdx), %xmm14 -; SSE-NEXT: movaps (%rcx), %xmm3 -; SSE-NEXT: movaps 16(%rcx), %xmm6 -; SSE-NEXT: movaps (%r8), %xmm5 -; SSE-NEXT: movaps 16(%r8), %xmm13 -; SSE-NEXT: movaps (%r9), %xmm0 -; SSE-NEXT: movaps 16(%r9), %xmm1 -; SSE-NEXT: movaps %xmm7, %xmm2 -; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm11[0] -; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm11[1] -; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm12, %xmm7 -; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm3[0] -; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1] -; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm5, %xmm3 -; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0] -; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] -; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm10, %xmm11 -; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm4[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm4[1] -; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm9, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm9 = xmm9[0],xmm6[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] -; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps %xmm13, %xmm10 -; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm1[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1] -; SSE-NEXT: movaps %xmm15, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm8[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1] -; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movaps 32(%rcx), %xmm1 -; SSE-NEXT: movaps %xmm14, %xmm12 -; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm1[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm1[1] -; SSE-NEXT: movaps 32(%r8), %xmm5 -; SSE-NEXT: movaps 32(%r9), %xmm0 -; SSE-NEXT: movaps %xmm5, %xmm8 -; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] -; SSE-NEXT: movaps 48(%rdi), %xmm6 -; SSE-NEXT: movaps 48(%rsi), %xmm3 -; SSE-NEXT: movaps %xmm6, %xmm7 -; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm3[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm3[1] -; SSE-NEXT: movaps 48(%rdx), %xmm3 -; SSE-NEXT: movaps 48(%rcx), %xmm2 -; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1] -; SSE-NEXT: movaps 48(%r8), %xmm2 -; SSE-NEXT: movaps 48(%r9), %xmm4 -; SSE-NEXT: movaps %xmm2, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movaps %xmm2, 368(%rax) -; SSE-NEXT: movaps %xmm3, 352(%rax) -; SSE-NEXT: movaps %xmm6, 336(%rax) -; SSE-NEXT: movaps %xmm0, 320(%rax) -; SSE-NEXT: movaps %xmm1, 304(%rax) -; SSE-NEXT: movaps %xmm7, 288(%rax) -; SSE-NEXT: movaps %xmm5, 272(%rax) -; SSE-NEXT: movaps %xmm14, 256(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 240(%rax) -; SSE-NEXT: movaps %xmm8, 224(%rax) -; SSE-NEXT: movaps %xmm12, 208(%rax) -; SSE-NEXT: movaps %xmm15, 192(%rax) -; SSE-NEXT: movaps %xmm13, 176(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 160(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rax) -; SSE-NEXT: movaps %xmm10, 128(%rax) -; SSE-NEXT: movaps %xmm9, 112(%rax) -; SSE-NEXT: movaps %xmm11, 96(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 80(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 64(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 48(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 32(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 16(%rax) -; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, (%rax) -; SSE-NEXT: addq $24, %rsp +; SSE-NEXT: movapd (%r8), %xmm7 +; SSE-NEXT: movapd 16(%r8), %xmm0 +; SSE-NEXT: movapd 32(%r8), %xmm1 +; SSE-NEXT: movapd 48(%r8), %xmm5 +; SSE-NEXT: movapd (%r9), %xmm4 +; SSE-NEXT: movapd 16(%r9), %xmm3 +; SSE-NEXT: movapd 32(%r9), %xmm2 +; SSE-NEXT: movapd 48(%r9), %xmm9 +; SSE-NEXT: movapd %xmm7, %xmm8 +; SSE-NEXT: movapd %xmm7, (%rax) +; SSE-NEXT: movapd %xmm7, 64(%rax) +; SSE-NEXT: movapd %xmm7, 192(%rax) +; SSE-NEXT: movapd %xmm7, 256(%rax) +; SSE-NEXT: unpcklpd {{.*#+}} xmm7 = xmm7[0],xmm9[0] +; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm5[0],xmm9[1] +; SSE-NEXT: movapd %xmm1, %xmm6 +; SSE-NEXT: movapd %xmm1, 96(%rax) +; SSE-NEXT: movapd %xmm1, 160(%rax) +; SSE-NEXT: movapd %xmm1, 288(%rax) +; SSE-NEXT: movapd %xmm1, 352(%rax) +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] +; SSE-NEXT: unpcklpd {{.*#+}} xmm8 = xmm8[0],xmm3[0] +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm5[0],xmm3[1] +; SSE-NEXT: unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm4[0] +; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1] +; SSE-NEXT: movapd %xmm0, 16(%rax) +; SSE-NEXT: movapd %xmm5, 48(%rax) +; SSE-NEXT: movapd %xmm5, 112(%rax) +; SSE-NEXT: movapd %xmm0, 144(%rax) +; SSE-NEXT: movapd %xmm0, 208(%rax) +; SSE-NEXT: movapd %xmm5, 240(%rax) +; SSE-NEXT: movapd %xmm5, 304(%rax) +; SSE-NEXT: movapd %xmm0, 336(%rax) +; SSE-NEXT: movapd %xmm6, 32(%rax) +; SSE-NEXT: movapd %xmm4, 80(%rax) +; SSE-NEXT: movapd %xmm8, 128(%rax) +; SSE-NEXT: movapd %xmm3, 176(%rax) +; SSE-NEXT: movapd %xmm1, 224(%rax) +; SSE-NEXT: movapd %xmm2, 272(%rax) +; SSE-NEXT: movapd %xmm7, 320(%rax) +; SSE-NEXT: movapd %xmm9, 368(%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i64_stride6_vf8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rdi), %ymm4 -; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 -; AVX1-NEXT: vmovaps (%rdx), %ymm5 -; AVX1-NEXT: vmovaps 32(%rdx), %ymm2 -; AVX1-NEXT: vmovaps (%r8), %ymm3 -; AVX1-NEXT: vmovaps 32(%r8), %ymm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] -; AVX1-NEXT: vmovaps 48(%rdx), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] -; AVX1-NEXT: vmovaps 48(%r8), %xmm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm1[4,5,6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2] -; AVX1-NEXT: vmovaps 48(%rdi), %xmm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm2[4,5,6,7] -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3] -; AVX1-NEXT: vmovaps 16(%rdx), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1] -; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm6[0,1,2,3],ymm3[4,5,6,7] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],mem[1],ymm4[3],mem[3] -; AVX1-NEXT: vmovaps 16(%r8), %xmm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3],ymm4[4,5,6,7] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],mem[0],ymm5[2],mem[2] -; AVX1-NEXT: vmovaps 16(%rdi), %xmm6 -; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0] -; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm6[0,1,2,3],ymm5[4,5,6,7] -; AVX1-NEXT: vmovaps 32(%rcx), %xmm6 -; AVX1-NEXT: vmovaps 32(%rdx), %xmm7 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm7[1],xmm6[1] -; AVX1-NEXT: vmovaps (%r9), %xmm1 -; AVX1-NEXT: vmovaps 32(%r9), %xmm2 -; AVX1-NEXT: vmovaps 32(%r8), %xmm3 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm3[1],xmm2[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm10 = xmm3[0],xmm2[0] -; AVX1-NEXT: vmovaps (%rsi), %xmm3 -; AVX1-NEXT: vmovaps 32(%rsi), %xmm5 -; AVX1-NEXT: vmovaps 32(%rdi), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm5[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm5[0] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm6[0] -; AVX1-NEXT: vmovaps (%r8), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm1[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm1[0] -; AVX1-NEXT: vmovaps (%rdi), %xmm6 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm3[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm6[0],xmm3[0] -; AVX1-NEXT: vmovaps (%rcx), %xmm6 -; AVX1-NEXT: vmovaps (%rdx), %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1] -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %xmm0, 16(%rax) -; AVX1-NEXT: vmovaps %xmm3, (%rax) -; AVX1-NEXT: vmovaps %xmm1, 48(%rax) -; AVX1-NEXT: vmovaps %xmm4, 32(%rax) -; AVX1-NEXT: vmovaps %xmm7, 80(%rax) -; AVX1-NEXT: vmovaps %xmm2, 64(%rax) -; AVX1-NEXT: vmovaps %xmm5, 208(%rax) -; AVX1-NEXT: vmovaps %xmm8, 192(%rax) -; AVX1-NEXT: vmovaps %xmm9, 240(%rax) -; AVX1-NEXT: vmovaps %xmm10, 224(%rax) -; AVX1-NEXT: vmovaps %xmm15, 272(%rax) -; AVX1-NEXT: vmovaps %xmm14, 256(%rax) -; AVX1-NEXT: vmovaps %ymm13, 96(%rax) -; AVX1-NEXT: vmovaps %ymm12, 128(%rax) -; AVX1-NEXT: vmovaps %ymm11, 160(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 288(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 320(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 352(%rax) +; AVX1-NEXT: vmovapd (%r8), %ymm0 +; AVX1-NEXT: vmovapd 32(%r8), %ymm1 +; AVX1-NEXT: vmovapd (%r9), %ymm2 +; AVX1-NEXT: vmovapd 32(%r9), %ymm3 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm0[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm0[0],ymm4[0],ymm0[2],ymm4[3] +; AVX1-NEXT: vinsertf128 $1, 32(%r9), %ymm0, %ymm5 +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm0[0],ymm5[1],ymm0[2],ymm5[3] +; AVX1-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0] +; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm1[0],ymm6[1],ymm1[2,3] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3],ymm0[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm7 = ymm0[0],ymm7[0],ymm0[2],ymm7[3] +; AVX1-NEXT: vinsertf128 $1, (%r9), %ymm0, %ymm8 +; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm0[0],ymm8[1],ymm0[2],ymm8[3] +; AVX1-NEXT: vmovddup {{.*#+}} xmm9 = mem[0,0] +; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm1[0],ymm9[1],ymm1[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3] +; AVX1-NEXT: vmovapd %ymm1, 96(%rax) +; AVX1-NEXT: vmovapd %ymm2, 160(%rax) +; AVX1-NEXT: vmovapd %ymm0, 192(%rax) +; AVX1-NEXT: vmovapd %ymm1, 288(%rax) +; AVX1-NEXT: vmovapd %ymm3, 352(%rax) +; AVX1-NEXT: vmovapd %ymm0, (%rax) +; AVX1-NEXT: vmovapd %ymm9, 32(%rax) +; AVX1-NEXT: vmovapd %ymm8, 64(%rax) +; AVX1-NEXT: vmovapd %ymm7, 128(%rax) +; AVX1-NEXT: vmovapd %ymm6, 224(%rax) +; AVX1-NEXT: vmovapd %ymm5, 256(%rax) +; AVX1-NEXT: vmovapd %ymm4, 320(%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_i64_stride6_vf8: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps (%rdi), %ymm5 -; AVX2-NEXT: vmovaps 32(%rdi), %ymm2 -; AVX2-NEXT: vmovaps (%rsi), %ymm6 -; AVX2-NEXT: vmovaps 32(%rsi), %ymm3 -; AVX2-NEXT: vmovaps (%rdx), %ymm7 -; AVX2-NEXT: vmovaps 32(%rdx), %ymm4 -; AVX2-NEXT: vmovaps (%rcx), %ymm8 -; AVX2-NEXT: vmovaps 32(%rcx), %ymm9 -; AVX2-NEXT: vmovaps (%r8), %ymm10 -; AVX2-NEXT: vmovaps 32(%r8), %ymm1 -; AVX2-NEXT: vmovaps (%r9), %ymm11 -; AVX2-NEXT: vmovaps 32(%r9), %ymm12 -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm12[1],ymm1[3],ymm12[3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm4[1],ymm9[1],ymm4[3],ymm9[3] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3] -; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm13[2,3] -; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm9[0],ymm4[2],ymm9[2] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm4[2,3] -; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm10[1],ymm11[1],ymm10[3],ymm11[3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm8[1],ymm7[3],ymm8[3] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm4[2,3],ymm3[2,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm10[0],ymm11[0],ymm10[2],ymm11[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm9[2,3],ymm4[2,3] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2] -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2] -; AVX2-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm5[2,3],ymm7[2,3] -; AVX2-NEXT: vmovaps 32(%rcx), %xmm6 -; AVX2-NEXT: vmovaps 32(%rdx), %xmm7 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm6[1] -; AVX2-NEXT: vmovaps (%r9), %xmm1 -; AVX2-NEXT: vmovaps 32(%r9), %xmm2 -; AVX2-NEXT: vmovaps (%r8), %xmm3 -; AVX2-NEXT: vmovaps 32(%r8), %xmm4 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm2[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0] -; AVX2-NEXT: vmovaps 32(%rsi), %xmm4 -; AVX2-NEXT: vmovaps 32(%rdi), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm4[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm4[0] -; AVX2-NEXT: vmovaps (%rsi), %xmm4 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0] -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm3[0],xmm1[0] -; AVX2-NEXT: vmovaps (%rdi), %xmm3 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0] -; AVX2-NEXT: vmovaps (%rcx), %xmm4 -; AVX2-NEXT: vmovaps (%rdx), %xmm0 -; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm4[1] -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: vmovaps %xmm0, 16(%rax) -; AVX2-NEXT: vmovaps %xmm3, (%rax) -; AVX2-NEXT: vmovaps %xmm1, 48(%rax) -; AVX2-NEXT: vmovaps %xmm5, 32(%rax) -; AVX2-NEXT: vmovaps %xmm7, 80(%rax) -; AVX2-NEXT: vmovaps %xmm2, 64(%rax) -; AVX2-NEXT: vmovaps %xmm6, 208(%rax) -; AVX2-NEXT: vmovaps %xmm8, 192(%rax) -; AVX2-NEXT: vmovaps %xmm9, 240(%rax) -; AVX2-NEXT: vmovaps %xmm10, 224(%rax) -; AVX2-NEXT: vmovaps %xmm11, 272(%rax) -; AVX2-NEXT: vmovaps %xmm12, 256(%rax) -; AVX2-NEXT: vmovaps %ymm13, 96(%rax) -; AVX2-NEXT: vmovaps %ymm14, 128(%rax) -; AVX2-NEXT: vmovaps %ymm15, 160(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 288(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 320(%rax) -; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX2-NEXT: vmovaps %ymm0, 352(%rax) +; AVX2-NEXT: vmovaps (%r8), %ymm0 +; AVX2-NEXT: vmovaps 32(%r8), %ymm1 +; AVX2-NEXT: vmovaps (%r9), %xmm2 +; AVX2-NEXT: vmovaps 32(%r9), %xmm3 +; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = xmm3[0,0] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = xmm2[0,0] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm5[2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm3[6,7] +; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vbroadcastsd 48(%r9), %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3,4,5],mem[6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3,4,5],mem[6,7] +; AVX2-NEXT: vbroadcastsd 16(%r9), %ymm9 +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1],ymm9[2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vmovaps %ymm1, 96(%rax) +; AVX2-NEXT: vmovaps %ymm9, 128(%rax) +; AVX2-NEXT: vmovaps %ymm8, 160(%rax) +; AVX2-NEXT: vmovaps %ymm0, 192(%rax) +; AVX2-NEXT: vmovaps %ymm1, 288(%rax) +; AVX2-NEXT: vmovaps %ymm7, 352(%rax) +; AVX2-NEXT: vmovaps %ymm6, 320(%rax) +; AVX2-NEXT: vmovaps %ymm0, (%rax) +; AVX2-NEXT: vmovaps %ymm2, 64(%rax) +; AVX2-NEXT: vmovaps %ymm3, 256(%rax) +; AVX2-NEXT: vmovaps %ymm5, 32(%rax) +; AVX2-NEXT: vmovaps %ymm4, 224(%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: store_i64_stride6_vf8: ; AVX512: # %bb.0: -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm5 -; AVX512-NEXT: vmovdqu64 (%rsi), %zmm6 -; AVX512-NEXT: vmovdqu64 (%rdx), %zmm3 -; AVX512-NEXT: vmovdqu64 (%rcx), %zmm4 -; AVX512-NEXT: vmovdqu64 (%r8), %zmm8 -; AVX512-NEXT: vmovdqu64 (%r9), %zmm2 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [1,9,2,10,1,9,2,10] -; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm0 -; AVX512-NEXT: vmovdqa (%r8), %xmm7 -; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm7[1],mem[1] -; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7 -; AVX512-NEXT: movb $12, %al -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vinserti64x4 $0, %ymm7, %zmm0, %zmm0 {%k1} -; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [2,10,2,10,2,10,2,10] -; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm7 -; AVX512-NEXT: movb $48, %al -; AVX512-NEXT: kmovd %eax, %k2 -; AVX512-NEXT: vmovdqa64 %zmm7, %zmm0 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,8,1,9,0,8,1,9] -; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm7 -; AVX512-NEXT: vmovdqa (%rdx), %xmm1 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] -; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 -; AVX512-NEXT: vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k1} -; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8] -; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm1 -; AVX512-NEXT: vmovdqa64 %zmm1, %zmm7 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [2,10,3,11,2,10,3,11] -; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm1 -; AVX512-NEXT: vmovdqa (%rdi), %ymm9 -; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],mem[1],ymm9[3],mem[3] -; AVX512-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1} -; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm9 -; AVX512-NEXT: vmovdqa64 %zmm9, %zmm1 {%k2} -; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm9 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [6,14,7,15,6,14,7,15] -; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm10 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = -; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm11 -; AVX512-NEXT: vmovdqa64 %zmm11, %zmm10 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm9, %zmm10 {%k2} -; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm9 -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [5,13,6,14,5,13,6,14] -; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm11 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = -; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm12 -; AVX512-NEXT: vmovdqa64 %zmm12, %zmm11 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm9, %zmm11 {%k2} -; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [4,12,5,13,4,12,5,13] -; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3] -; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm9 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = -; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm5 -; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9 {%k1} -; AVX512-NEXT: vpunpcklqdq {{.*#+}} zmm9 {%k2} = zmm8[0],zmm2[0],zmm8[2],zmm2[2],zmm8[4],zmm2[4],zmm8[6],zmm2[6] -; AVX512-NEXT: vmovdqu64 %zmm11, 256(%r10) -; AVX512-NEXT: vmovdqu64 %zmm10, 320(%r10) -; AVX512-NEXT: vmovdqu64 %zmm1, 128(%r10) -; AVX512-NEXT: vmovdqu64 %zmm9, 192(%r10) -; AVX512-NEXT: vmovdqu64 %zmm7, (%r10) -; AVX512-NEXT: vmovdqu64 %zmm0, 64(%r10) +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax +; AVX512-NEXT: vmovdqu64 (%r8), %zmm0 +; AVX512-NEXT: vmovdqu64 (%r9), %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,8,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,9,4,5,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,10,2,3,4,5,6,11] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,12,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,13,4,5,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,14,2,3,4,5,6,15] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqu64 %zmm7, 320(%rax) +; AVX512-NEXT: vmovdqu64 %zmm6, 256(%rax) +; AVX512-NEXT: vmovdqu64 %zmm5, 192(%rax) +; AVX512-NEXT: vmovdqu64 %zmm4, 128(%rax) +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%rax) +; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll @@ -227,64 +227,33 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr1, <16 x i8>* %in.vecptr2, <48 x i8>* %out.vec) nounwind { ; SSE-LABEL: store_i8_stride3_vf16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm5 -; SSE-NEXT: movdqa (%rsi), %xmm1 -; SSE-NEXT: movdqa (%rdx), %xmm8 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: pandn %xmm2, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3] -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6] -; SSE-NEXT: pand %xmm0, %xmm6 -; SSE-NEXT: por %xmm4, %xmm6 +; SSE-NEXT: movdqa (%rsi), %xmm0 +; SSE-NEXT: movdqa (%rdx), %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255] -; SSE-NEXT: pand %xmm2, %xmm6 -; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,4,6,5] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm7, %xmm4 -; SSE-NEXT: por %xmm6, %xmm4 -; SSE-NEXT: movdqa %xmm1, %xmm6 -; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2] -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,2,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm7 -; SSE-NEXT: pandn %xmm6, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,1,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6] -; SSE-NEXT: pand %xmm0, %xmm6 -; SSE-NEXT: por %xmm7, %xmm6 -; SSE-NEXT: pand %xmm2, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: pandn %xmm7, %xmm3 -; SSE-NEXT: por %xmm6, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3] +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pand %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1] +; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,0,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6] +; SSE-NEXT: pandn %xmm4, %xmm2 +; SSE-NEXT: por %xmm3, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: pand %xmm3, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7] -; SSE-NEXT: pand %xmm0, %xmm6 -; SSE-NEXT: pandn %xmm5, %xmm0 -; SSE-NEXT: por %xmm6, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7] -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: por %xmm0, %xmm2 -; SSE-NEXT: movdqa %xmm2, 32(%rcx) -; SSE-NEXT: movdqa %xmm3, (%rcx) +; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7] +; SSE-NEXT: pandn %xmm5, %xmm3 +; SSE-NEXT: por %xmm4, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] +; SSE-NEXT: pand %xmm4, %xmm0 +; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,3,3,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] +; SSE-NEXT: pandn %xmm1, %xmm4 +; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, 16(%rcx) +; SSE-NEXT: movdqa %xmm3, 32(%rcx) +; SSE-NEXT: movdqa %xmm2, (%rcx) ; SSE-NEXT: retq ; ; AVX-LABEL: store_i8_stride3_vf16: @@ -347,122 +316,60 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr1, <32 x i8>* %in.vecptr2, <96 x i8>* %out.vec) nounwind { ; SSE-LABEL: store_i8_stride3_vf32: ; SSE: # %bb.0: -; SSE-NEXT: movdqa (%rdi), %xmm9 -; SSE-NEXT: movdqa 16(%rdi), %xmm11 -; SSE-NEXT: movdqa (%rsi), %xmm13 -; SSE-NEXT: movdqa 16(%rsi), %xmm7 +; SSE-NEXT: movdqa (%rsi), %xmm4 +; SSE-NEXT: movdqa 16(%rsi), %xmm10 ; SSE-NEXT: movdqa (%rdx), %xmm8 -; SSE-NEXT: movdqa 16(%rdx), %xmm10 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,1,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,1,2,3] -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6] -; SSE-NEXT: pand %xmm0, %xmm2 -; SSE-NEXT: por %xmm3, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255] -; SSE-NEXT: pand %xmm4, %xmm2 -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,4,6,5] -; SSE-NEXT: movdqa %xmm4, %xmm12 -; SSE-NEXT: pandn %xmm6, %xmm12 -; SSE-NEXT: por %xmm2, %xmm12 -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,1,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm0, %xmm6 -; SSE-NEXT: pandn %xmm2, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,1,2,3] -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6] -; SSE-NEXT: pand %xmm0, %xmm2 -; SSE-NEXT: por %xmm6, %xmm2 -; SSE-NEXT: pand %xmm4, %xmm2 -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,4,6,5] -; SSE-NEXT: movdqa %xmm4, %xmm6 -; SSE-NEXT: pandn %xmm1, %xmm6 -; SSE-NEXT: por %xmm2, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7] +; SSE-NEXT: movdqa 16(%rdx), %xmm9 +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] +; SSE-NEXT: movdqa %xmm10, %xmm5 +; SSE-NEXT: pand %xmm0, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[2,3,2,3] +; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,2,2,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,6,5,7,7] ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,3,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7] +; SSE-NEXT: pandn %xmm6, %xmm2 +; SSE-NEXT: por %xmm5, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255] +; SSE-NEXT: movdqa %xmm10, %xmm1 +; SSE-NEXT: pand %xmm5, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,1,0,1] +; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,5,5,6,6] +; SSE-NEXT: movdqa %xmm5, %xmm6 +; SSE-NEXT: pandn %xmm7, %xmm6 +; SSE-NEXT: por %xmm1, %xmm6 +; SSE-NEXT: movdqa %xmm4, %xmm1 ; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm2, %xmm1 -; SSE-NEXT: pand %xmm4, %xmm1 -; SSE-NEXT: movdqa %xmm7, %xmm2 -; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7] -; SSE-NEXT: movdqa %xmm4, %xmm2 -; SSE-NEXT: pandn %xmm3, %xmm2 -; SSE-NEXT: por %xmm1, %xmm2 -; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,1,2] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm1, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,1,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,6] -; SSE-NEXT: pand %xmm0, %xmm7 -; SSE-NEXT: por %xmm3, %xmm7 -; SSE-NEXT: pand %xmm4, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,5,5,6,6] +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3] +; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,1,2,2,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7] +; SSE-NEXT: pandn %xmm7, %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm4, %xmm1 -; SSE-NEXT: pandn %xmm3, %xmm1 -; SSE-NEXT: por %xmm7, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7] -; SSE-NEXT: movdqa %xmm0, %xmm7 -; SSE-NEXT: pandn %xmm3, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: por %xmm7, %xmm3 -; SSE-NEXT: pand %xmm4, %xmm3 -; SSE-NEXT: movdqa %xmm13, %xmm7 -; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,2,2,3] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7] -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: pandn %xmm7, %xmm5 -; SSE-NEXT: por %xmm3, %xmm5 -; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,1,2] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7] +; SSE-NEXT: pand %xmm5, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1] +; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6] -; SSE-NEXT: pand %xmm0, %xmm7 -; SSE-NEXT: pandn %xmm3, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pand %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6] -; SSE-NEXT: pandn %xmm3, %xmm4 -; SSE-NEXT: por %xmm0, %xmm4 -; SSE-NEXT: movdqa %xmm4, (%rcx) -; SSE-NEXT: movdqa %xmm5, 32(%rcx) -; SSE-NEXT: movdqa %xmm1, 48(%rcx) +; SSE-NEXT: pandn %xmm7, %xmm5 +; SSE-NEXT: por %xmm1, %xmm5 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm9[2,1,3,3,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] +; SSE-NEXT: movdqa %xmm1, %xmm3 +; SSE-NEXT: pandn %xmm7, %xmm3 +; SSE-NEXT: por %xmm4, %xmm3 +; SSE-NEXT: pand %xmm1, %xmm10 +; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[2,1,3,3,4,5,6,7] +; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] +; SSE-NEXT: pandn %xmm4, %xmm1 +; SSE-NEXT: por %xmm10, %xmm1 +; SSE-NEXT: movdqa %xmm1, 16(%rcx) +; SSE-NEXT: movdqa %xmm3, 64(%rcx) +; SSE-NEXT: movdqa %xmm5, (%rcx) +; SSE-NEXT: movdqa %xmm0, 32(%rcx) +; SSE-NEXT: movdqa %xmm6, 48(%rcx) ; SSE-NEXT: movdqa %xmm2, 80(%rcx) -; SSE-NEXT: movdqa %xmm6, 16(%rcx) -; SSE-NEXT: movdqa %xmm12, 64(%rcx) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i8_stride3_vf32: diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll @@ -174,14 +174,14 @@ ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE-NEXT: packuswb %xmm1, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,65535,65535,65535,65535,65535] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535] +; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,1,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7] ; SSE-NEXT: packuswb %xmm0, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: pandn %xmm0, %xmm2 +; SSE-NEXT: por %xmm1, %xmm2 ; SSE-NEXT: movq %xmm2, 16(%rax) ; SSE-NEXT: movdqa %xmm3, (%rax) ; SSE-NEXT: retq @@ -294,53 +294,40 @@ ; SSE-LABEL: store_i8_stride6_vf8: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero +; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0] -; SSE-NEXT: pand %xmm1, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1] -; SSE-NEXT: movdqa %xmm1, %xmm6 -; SSE-NEXT: pandn %xmm5, %xmm6 -; SSE-NEXT: por %xmm3, %xmm6 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: pand %xmm3, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,0,0] -; SSE-NEXT: movdqa %xmm3, %xmm5 -; SSE-NEXT: pandn %xmm7, %xmm5 +; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE-NEXT: movdqa %xmm2, %xmm4 +; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE-NEXT: packuswb %xmm4, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,2,3] +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535] +; SSE-NEXT: movdqa %xmm0, %xmm6 +; SSE-NEXT: pand %xmm5, %xmm6 +; SSE-NEXT: pandn %xmm4, %xmm5 ; SSE-NEXT: por %xmm6, %xmm5 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,2,3,3] -; SSE-NEXT: movdqa %xmm1, %xmm7 -; SSE-NEXT: pandn %xmm6, %xmm7 -; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,5,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,3] -; SSE-NEXT: pand %xmm1, %xmm6 -; SSE-NEXT: por %xmm7, %xmm6 -; SSE-NEXT: pand %xmm3, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[3,3,3,3] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm8, %xmm7 -; SSE-NEXT: por %xmm6, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,2] -; SSE-NEXT: pand %xmm1, %xmm4 -; SSE-NEXT: pandn %xmm2, %xmm1 -; SSE-NEXT: por %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: por %xmm1, %xmm3 -; SSE-NEXT: movdqa %xmm3, 16(%rax) -; SSE-NEXT: movdqa %xmm7, 32(%rax) +; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE-NEXT: packuswb %xmm2, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0] +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: pand %xmm3, %xmm4 +; SSE-NEXT: pandn %xmm2, %xmm3 +; SSE-NEXT: por %xmm4, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,0,65535] +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, 16(%rax) +; SSE-NEXT: movdqa %xmm3, 32(%rax) ; SSE-NEXT: movdqa %xmm5, (%rax) ; SSE-NEXT: retq ; @@ -497,238 +484,124 @@ ; SSE-LABEL: store_i8_stride6_vf16: ; SSE: # %bb.0: ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movdqa (%rdi), %xmm6 -; SSE-NEXT: movdqa (%rsi), %xmm10 -; SSE-NEXT: movdqa (%rdx), %xmm5 -; SSE-NEXT: movdqa (%rcx), %xmm12 -; SSE-NEXT: movdqa (%r8), %xmm7 -; SSE-NEXT: movdqa (%r9), %xmm14 -; SSE-NEXT: movdqa %xmm6, %xmm11 -; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1] -; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,0] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm5, %xmm8 -; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm2, %xmm3 -; SSE-NEXT: por %xmm1, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: movdqa %xmm7, %xmm13 -; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3],xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,0,0,0] -; SSE-NEXT: movdqa %xmm2, %xmm9 -; SSE-NEXT: pandn %xmm1, %xmm9 -; SSE-NEXT: por %xmm3, %xmm9 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm1, %xmm3 -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm3, %xmm1 -; SSE-NEXT: pand %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[3,3,3,3] -; SSE-NEXT: movdqa %xmm2, %xmm15 -; SSE-NEXT: pandn %xmm4, %xmm15 -; SSE-NEXT: por %xmm1, %xmm15 -; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,2,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: pandn %xmm1, %xmm4 -; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: por %xmm4, %xmm1 -; SSE-NEXT: pand %xmm2, %xmm1 -; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3] -; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm3, %xmm4 -; SSE-NEXT: por %xmm1, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,0,1,1] -; SSE-NEXT: pand %xmm0, %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,1,0,1] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pandn %xmm10, %xmm3 -; SSE-NEXT: por %xmm1, %xmm3 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,0,0] -; SSE-NEXT: movdqa %xmm2, %xmm1 -; SSE-NEXT: pandn %xmm10, %xmm1 -; SSE-NEXT: por %xmm3, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,2,2] -; SSE-NEXT: movdqa %xmm0, %xmm6 -; SSE-NEXT: pandn %xmm3, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,2,2] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: por %xmm6, %xmm3 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm2, %xmm6 -; SSE-NEXT: pandn %xmm5, %xmm6 -; SSE-NEXT: por %xmm3, %xmm6 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,2,2] -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[1,1,2,2] -; SSE-NEXT: pand %xmm0, %xmm5 -; SSE-NEXT: pandn %xmm3, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm3, %xmm2 -; SSE-NEXT: por %xmm0, %xmm2 -; SSE-NEXT: movdqa %xmm2, 16(%rax) -; SSE-NEXT: movdqa %xmm6, 64(%rax) +; SSE-NEXT: movdqa (%r8), %xmm12 +; SSE-NEXT: movdqa (%r9), %xmm8 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255] +; SSE-NEXT: movdqa %xmm12, %xmm10 +; SSE-NEXT: pand %xmm1, %xmm10 +; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,0,0,0] +; SSE-NEXT: movdqa %xmm1, %xmm11 +; SSE-NEXT: pandn %xmm4, %xmm11 +; SSE-NEXT: por %xmm10, %xmm11 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0] +; SSE-NEXT: movdqa %xmm12, %xmm7 +; SSE-NEXT: pand %xmm4, %xmm7 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,2,3,3] +; SSE-NEXT: movdqa %xmm4, %xmm5 +; SSE-NEXT: pandn %xmm6, %xmm5 +; SSE-NEXT: por %xmm7, %xmm5 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255] +; SSE-NEXT: pand %xmm6, %xmm12 +; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15] +; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,1,2,2] +; SSE-NEXT: movdqa %xmm6, %xmm2 +; SSE-NEXT: pandn %xmm8, %xmm2 +; SSE-NEXT: por %xmm12, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3] +; SSE-NEXT: pandn %xmm0, %xmm4 +; SSE-NEXT: por %xmm7, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,0,0] +; SSE-NEXT: pandn %xmm0, %xmm1 +; SSE-NEXT: por %xmm10, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,2,2] +; SSE-NEXT: pandn %xmm0, %xmm6 +; SSE-NEXT: por %xmm12, %xmm6 +; SSE-NEXT: movdqa %xmm6, 16(%rax) ; SSE-NEXT: movdqa %xmm1, 48(%rax) ; SSE-NEXT: movdqa %xmm4, 80(%rax) -; SSE-NEXT: movdqa %xmm15, 32(%rax) -; SSE-NEXT: movdqa %xmm9, (%rax) +; SSE-NEXT: movdqa %xmm2, 64(%rax) +; SSE-NEXT: movdqa %xmm5, 32(%rax) +; SSE-NEXT: movdqa %xmm11, (%rax) ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i8_stride6_vf16: ; AVX1: # %bb.0: ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovdqa (%rdi), %xmm8 -; AVX1-NEXT: vmovdqa (%rsi), %xmm9 -; AVX1-NEXT: vmovdqa (%rdx), %xmm10 -; AVX1-NEXT: vmovdqa (%rcx), %xmm4 -; AVX1-NEXT: vmovdqa (%r8), %xmm5 -; AVX1-NEXT: vmovdqa (%r9), %xmm6 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm7[1,1,2,2] -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7] -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm11 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[0,0,1,1] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7] +; AVX1-NEXT: vmovdqa (%rdx), %xmm0 +; AVX1-NEXT: vmovdqa (%rcx), %xmm1 +; AVX1-NEXT: vmovdqa (%r8), %xmm2 +; AVX1-NEXT: vmovdqa (%r9), %xmm3 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm1[1,2],xmm5[3],xmm1[4,5],xmm5[6],xmm1[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm4[0,0,0,0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm6[2],xmm0[3,4],xmm6[5],xmm0[6,7] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,0,0] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm4[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm6[0,0,1,1] -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2],xmm0[3],xmm5[4,5],xmm0[6],xmm5[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6],xmm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[3,3,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[3,3,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,2,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm6[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vmovaps %ymm1, 64(%rax) -; AVX1-NEXT: vmovaps %ymm0, 32(%rax) -; AVX1-NEXT: vmovaps %ymm11, (%rax) +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm3[2],xmm1[3,4],xmm3[5],xmm1[6,7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6],xmm4[7] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,2,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6],xmm4[7] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm0, 64(%rax) +; AVX1-NEXT: vmovaps %ymm3, 32(%rax) +; AVX1-NEXT: vmovaps %ymm5, (%rax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride6_vf16: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm2 -; AVX2-SLOW-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u],zero,zero,ymm3[1,9,u,u],zero,zero,ymm3[2,10,u,u],zero,zero,ymm3[19,27,u,u],zero,zero,ymm3[20,28,u,u],zero,zero -; AVX2-SLOW-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29] -; AVX2-SLOW-NEXT: vpor %ymm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13,u,u],zero,zero,ymm4[6,14,u,u],zero,zero,ymm4[7,15,u,u],zero,zero,ymm4[16,24,u,u],zero,zero,ymm4[17,25,u,u],zero,zero -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26] -; AVX2-SLOW-NEXT: vpor %ymm4, %ymm6, %ymm4 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm4, %ymm6, %ymm4 -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11,u,u],zero,zero,ymm0[4,12,u,u],zero,zero,ymm0[5,13,u,u],zero,zero,ymm0[22,30,u,u],zero,zero,ymm0[23,31,u,u],zero,zero -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31] -; AVX2-SLOW-NEXT: vpor %ymm0, %ymm2, %ymm0 +; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-SLOW-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-SLOW-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,2,0,2] +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm2 +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3] +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm3, %ymm3 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u] -; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10,u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 32(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm3, 32(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, (%rax) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride6_vf16: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm1 -; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm2 -; AVX2-FAST-NEXT: vinserti128 $1, (%rcx), %ymm1, %ymm1 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,0,2] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u],zero,zero,ymm3[1,9,u,u],zero,zero,ymm3[2,10,u,u],zero,zero,ymm3[19,27,u,u],zero,zero,ymm3[20,28,u,u],zero,zero -; AVX2-FAST-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,ymm4[2,10],zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,ymm4[20,28],zero,zero,zero,zero,ymm4[21,29] -; AVX2-FAST-NEXT: vpor %ymm3, %ymm4, %ymm3 -; AVX2-FAST-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,0,2] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm2[0,2,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[5,13,u,u],zero,zero,ymm4[6,14,u,u],zero,zero,ymm4[7,15,u,u],zero,zero,ymm4[16,24,u,u],zero,zero,ymm4[17,25,u,u],zero,zero -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[5,13],zero,zero,zero,zero,ymm6[6,14],zero,zero,zero,zero,ymm6[7,15],zero,zero,zero,zero,ymm6[16,24],zero,zero,zero,zero,ymm6[17,25],zero,zero,zero,zero,ymm6[18,26] -; AVX2-FAST-NEXT: vpor %ymm4, %ymm6, %ymm4 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm0[0,2,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u,18,26,u,u] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm4, %ymm6, %ymm4 -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[3,11,u,u],zero,zero,ymm0[4,12,u,u],zero,zero,ymm0[5,13,u,u],zero,zero,ymm0[22,30,u,u],zero,zero,ymm0[23,31,u,u],zero,zero -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[1,3,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[2,10],zero,zero,zero,zero,ymm2[3,11],zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,ymm2[21,29],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,ymm2[23,31] -; AVX2-FAST-NEXT: vpor %ymm0, %ymm2, %ymm0 +; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm0 +; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm1 +; AVX2-FAST-NEXT: vinserti128 $1, (%rcx), %ymm0, %ymm0 +; AVX2-FAST-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,2,0,2] +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,8,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,19,27,u,u,u,u,20,28,u,u] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm2 +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,1,3] +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15,u,u,u,u,16,24,u,u,u,u,17,25,u,u,u,u] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm0, %ymm3, %ymm3 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31,u,u] -; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm1, %ymm0 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10,u,u,u,u,3,11,u,u,u,u,4,12,u,u,u,u,21,29,u,u,u,u,22,30,u,u,u,u,23,31] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0] +; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm4, 32(%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm3, (%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm3, 32(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm2, (%rax) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; @@ -799,711 +672,271 @@ define void @store_i8_stride6_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr1, <32 x i8>* %in.vecptr2, <32 x i8>* %in.vecptr3, <32 x i8>* %in.vecptr4, <32 x i8>* %in.vecptr5, <192 x i8>* %out.vec) nounwind { ; SSE-LABEL: store_i8_stride6_vf32: ; SSE: # %bb.0: -; SSE-NEXT: subq $56, %rsp -; SSE-NEXT: movdqa 16(%rdi), %xmm10 -; SSE-NEXT: movdqa 16(%rsi), %xmm8 -; SSE-NEXT: movdqa 16(%rdx), %xmm9 -; SSE-NEXT: movdqa 16(%rcx), %xmm4 -; SSE-NEXT: movdqa (%r8), %xmm13 -; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa 16(%r8), %xmm12 -; SSE-NEXT: movdqa 16(%r9), %xmm5 -; SSE-NEXT: movdqa %xmm12, %xmm0 -; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0] -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm9, %xmm6 -; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,5,6,7,7] -; SSE-NEXT: movdqa %xmm6, %xmm7 -; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: por %xmm0, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,65535,0,65535,65535] -; SSE-NEXT: pand %xmm15, %xmm1 -; SSE-NEXT: movdqa %xmm10, %xmm11 -; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm11[3,3,3,3] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: pandn %xmm6, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm6, %xmm7 -; SSE-NEXT: por %xmm1, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: movdqa (%r9), %xmm6 -; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pand %xmm15, %xmm7 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,2,3,3] -; SSE-NEXT: movdqa %xmm3, %xmm5 -; SSE-NEXT: pandn %xmm1, %xmm5 -; SSE-NEXT: movdqa %xmm9, %xmm2 -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,5,6,7,7] +; SSE-NEXT: movdqa (%r8), %xmm12 +; SSE-NEXT: movdqa 16(%r8), %xmm14 +; SSE-NEXT: movdqa (%r9), %xmm8 +; SSE-NEXT: movdqa 16(%r9), %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0] +; SSE-NEXT: movdqa %xmm14, %xmm13 +; SSE-NEXT: pand %xmm2, %xmm13 +; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3] ; SSE-NEXT: movdqa %xmm2, %xmm4 -; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: por %xmm5, %xmm1 -; SSE-NEXT: pand %xmm15, %xmm1 -; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7] -; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[3,3,3,3] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: pandn %xmm2, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,0,1,1] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: pandn %xmm2, %xmm4 -; SSE-NEXT: por %xmm1, %xmm4 -; SSE-NEXT: pand %xmm15, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,0,0,0] -; SSE-NEXT: movdqa %xmm15, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm13, %xmm0 -; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3] -; SSE-NEXT: movdqa %xmm0, %xmm9 -; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE-NEXT: movdqa %xmm3, %xmm2 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: movdqa (%rdx), %xmm8 -; SSE-NEXT: movdqa (%rcx), %xmm7 -; SSE-NEXT: movdqa %xmm8, %xmm13 -; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm7[8],xmm13[9],xmm7[9],xmm13[10],xmm7[10],xmm13[11],xmm7[11],xmm13[12],xmm7[12],xmm13[13],xmm7[13],xmm13[14],xmm7[14],xmm13[15],xmm7[15] -; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,5,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,2,2,3] -; SSE-NEXT: pand %xmm3, %xmm6 -; SSE-NEXT: por %xmm2, %xmm6 -; SSE-NEXT: movdqa (%rdi), %xmm4 -; SSE-NEXT: movdqa (%rsi), %xmm10 -; SSE-NEXT: movdqa %xmm4, %xmm5 -; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm10[8],xmm5[9],xmm10[9],xmm5[10],xmm10[10],xmm5[11],xmm10[11],xmm5[12],xmm10[12],xmm5[13],xmm10[13],xmm5[14],xmm10[14],xmm5[15],xmm10[15] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3] -; SSE-NEXT: movdqa %xmm15, %xmm14 -; SSE-NEXT: pandn %xmm1, %xmm14 -; SSE-NEXT: pand %xmm15, %xmm6 -; SSE-NEXT: por %xmm6, %xmm14 -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: pandn %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,0,1,1] -; SSE-NEXT: pand %xmm3, %xmm1 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,0,0,0] -; SSE-NEXT: movdqa %xmm15, %xmm9 -; SSE-NEXT: pandn %xmm1, %xmm9 -; SSE-NEXT: pand %xmm15, %xmm0 -; SSE-NEXT: por %xmm0, %xmm9 -; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload -; SSE-NEXT: punpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; SSE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3] -; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,5,6,7,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 -; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7] -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3] -; SSE-NEXT: movdqa %xmm15, %xmm6 -; SSE-NEXT: pandn %xmm1, %xmm6 -; SSE-NEXT: pand %xmm15, %xmm0 -; SSE-NEXT: por %xmm0, %xmm6 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[1,0,2,2,4,5,6,7] -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,1,1] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,0,0] -; SSE-NEXT: movdqa %xmm15, %xmm1 -; SSE-NEXT: pandn %xmm0, %xmm1 -; SSE-NEXT: pand %xmm15, %xmm7 -; SSE-NEXT: por %xmm7, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,2,2] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = mem[1,1,2,2] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload -; SSE-NEXT: # xmm7 = mem[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm15, %xmm11 -; SSE-NEXT: pandn %xmm7, %xmm11 -; SSE-NEXT: pand %xmm15, %xmm0 -; SSE-NEXT: por %xmm0, %xmm11 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = mem[1,1,2,2] -; SSE-NEXT: movdqa %xmm3, %xmm7 -; SSE-NEXT: pandn %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,2,2] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm7, %xmm0 -; SSE-NEXT: pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload -; SSE-NEXT: # xmm7 = mem[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm15, %xmm12 -; SSE-NEXT: pandn %xmm7, %xmm12 -; SSE-NEXT: pand %xmm15, %xmm0 -; SSE-NEXT: por %xmm0, %xmm12 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2] -; SSE-NEXT: movdqa %xmm3, %xmm5 -; SSE-NEXT: pandn %xmm0, %xmm5 -; SSE-NEXT: pshufd $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; SSE-NEXT: # xmm0 = mem[1,1,2,2] -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: por %xmm5, %xmm0 -; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm13[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4] -; SSE-NEXT: movdqa %xmm15, %xmm7 -; SSE-NEXT: pandn %xmm5, %xmm7 -; SSE-NEXT: pand %xmm15, %xmm0 -; SSE-NEXT: por %xmm0, %xmm7 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,2,2] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,2,2] -; SSE-NEXT: pand %xmm3, %xmm4 +; SSE-NEXT: pandn %xmm3, %xmm4 +; SSE-NEXT: por %xmm13, %xmm4 +; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm4 +; SSE-NEXT: pandn %xmm3, %xmm4 +; SSE-NEXT: movdqa %xmm4, %xmm7 +; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE-NEXT: movdqa %xmm6, %xmm11 +; SSE-NEXT: pandn %xmm1, %xmm11 +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm15 +; SSE-NEXT: pandn %xmm3, %xmm15 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm10 +; SSE-NEXT: pandn %xmm3, %xmm10 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0] +; SSE-NEXT: movdqa %xmm6, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 +; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm3 ; SSE-NEXT: pandn %xmm0, %xmm3 -; SSE-NEXT: por %xmm4, %xmm3 -; SSE-NEXT: pand %xmm15, %xmm3 -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7] -; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; SSE-NEXT: pandn %xmm0, %xmm15 -; SSE-NEXT: por %xmm3, %xmm15 +; SSE-NEXT: por %xmm13, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[1,1,2,2] +; SSE-NEXT: movdqa %xmm5, %xmm0 +; SSE-NEXT: pandn %xmm13, %xmm0 +; SSE-NEXT: movdqa %xmm12, %xmm13 +; SSE-NEXT: pand %xmm5, %xmm13 +; SSE-NEXT: por %xmm13, %xmm7 +; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: por %xmm13, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[0,0,0,0] +; SSE-NEXT: movdqa %xmm6, %xmm4 +; SSE-NEXT: pandn %xmm13, %xmm4 +; SSE-NEXT: movdqa %xmm14, %xmm13 +; SSE-NEXT: pand %xmm6, %xmm13 +; SSE-NEXT: por %xmm13, %xmm11 +; SSE-NEXT: por %xmm13, %xmm4 +; SSE-NEXT: movdqa %xmm12, %xmm13 +; SSE-NEXT: pand %xmm2, %xmm13 +; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[2,2,3,3] +; SSE-NEXT: pandn %xmm9, %xmm2 +; SSE-NEXT: por %xmm13, %xmm15 +; SSE-NEXT: pand %xmm5, %xmm14 +; SSE-NEXT: por %xmm13, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[1,1,2,2] +; SSE-NEXT: pandn %xmm7, %xmm5 +; SSE-NEXT: por %xmm14, %xmm10 +; SSE-NEXT: por %xmm14, %xmm5 +; SSE-NEXT: pand %xmm6, %xmm12 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,0,0,0] +; SSE-NEXT: pandn %xmm7, %xmm6 +; SSE-NEXT: por %xmm12, %xmm1 +; SSE-NEXT: por %xmm12, %xmm6 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax -; SSE-NEXT: movdqa %xmm15, 16(%rax) -; SSE-NEXT: movdqa %xmm7, 64(%rax) -; SSE-NEXT: movdqa %xmm12, 112(%rax) -; SSE-NEXT: movdqa %xmm11, 160(%rax) -; SSE-NEXT: movdqa %xmm1, (%rax) -; SSE-NEXT: movdqa %xmm6, 32(%rax) -; SSE-NEXT: movdqa %xmm9, 48(%rax) -; SSE-NEXT: movdqa %xmm14, 80(%rax) +; SSE-NEXT: movdqa %xmm6, (%rax) +; SSE-NEXT: movdqa %xmm5, 16(%rax) +; SSE-NEXT: movdqa %xmm2, 32(%rax) +; SSE-NEXT: movdqa %xmm4, 48(%rax) +; SSE-NEXT: movdqa %xmm0, 64(%rax) +; SSE-NEXT: movdqa %xmm3, 80(%rax) +; SSE-NEXT: movdqa %xmm1, 96(%rax) +; SSE-NEXT: movdqa %xmm10, 112(%rax) +; SSE-NEXT: movdqa %xmm15, 128(%rax) +; SSE-NEXT: movdqa %xmm11, 144(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 96(%rax) -; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 128(%rax) -; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload -; SSE-NEXT: movaps %xmm0, 144(%rax) +; SSE-NEXT: movaps %xmm0, 160(%rax) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 176(%rax) -; SSE-NEXT: addq $56, %rsp ; SSE-NEXT: retq ; ; AVX1-LABEL: store_i8_stride6_vf32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm0 -; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm13[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm13[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535] -; AVX1-NEXT: vandps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vmovdqa 16(%rcx), %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vmovdqa 16(%rdx), %xmm2 -; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vandnps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535] -; AVX1-NEXT: vandps %ymm0, %ymm14, %ymm0 -; AVX1-NEXT: vmovdqa (%r9), %xmm2 -; AVX1-NEXT: vmovdqa 16(%r9), %xmm4 -; AVX1-NEXT: vmovdqa (%r8), %xmm7 -; AVX1-NEXT: vmovdqa 16(%r8), %xmm3 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,0,0,0] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm8, %ymm5 -; AVX1-NEXT: vandnps %ymm5, %ymm14, %ymm5 -; AVX1-NEXT: vorps %ymm5, %ymm0, %ymm0 -; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm8[2,2,3,3] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm15 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm15[0,0,0,0] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqa (%rcx), %xmm5 -; AVX1-NEXT: vmovdqa (%rdx), %xmm7 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm2[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm11 = xmm10[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm11[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 -; AVX1-NEXT: vandnps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vandps %ymm5, %ymm9, %ymm5 -; AVX1-NEXT: vorps %ymm0, %ymm5, %ymm11 -; AVX1-NEXT: vmovdqa (%rsi), %xmm7 -; AVX1-NEXT: vmovdqa (%rdi), %xmm5 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[3,3,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm12, %ymm5, %ymm5 -; AVX1-NEXT: vandps %ymm14, %ymm11, %ymm11 -; AVX1-NEXT: vandnps %ymm5, %ymm14, %ymm5 -; AVX1-NEXT: vorps %ymm5, %ymm11, %ymm12 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm4[0,0,0,0] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm3 -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; AVX1-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload -; AVX1-NEXT: # xmm11 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm11[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 -; AVX1-NEXT: vandnps %ymm3, %ymm9, %ymm3 -; AVX1-NEXT: vandps %ymm5, %ymm9, %ymm5 -; AVX1-NEXT: vorps %ymm3, %ymm5, %ymm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm13[3,3,3,3] -; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; AVX1-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload -; AVX1-NEXT: # xmm6 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[0,0,1,1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1 -; AVX1-NEXT: vandps %ymm3, %ymm14, %ymm3 -; AVX1-NEXT: vandnps %ymm1, %ymm14, %ymm1 -; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[1,0,2,2,4,5,6,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vandnps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm8[0,0,0,0] -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,2] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm14, %ymm0 -; AVX1-NEXT: vandnps %ymm1, %ymm14, %ymm1 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm7[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm7[3,3,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm15[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm15[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1 -; AVX1-NEXT: vandnps %ymm0, %ymm9, %ymm0 -; AVX1-NEXT: vandps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm10[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm10[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm14, %ymm0 -; AVX1-NEXT: vandnps %ymm1, %ymm14, %ymm1 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[3,3,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,2] -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vandnps %ymm1, %ymm9, %ymm1 -; AVX1-NEXT: vandps %ymm4, %ymm9, %ymm4 -; AVX1-NEXT: vorps %ymm1, %ymm4, %ymm1 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm11[3,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,5,6,7,7] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4 -; AVX1-NEXT: vandps %ymm1, %ymm14, %ymm1 -; AVX1-NEXT: vandnps %ymm4, %ymm14, %ymm4 -; AVX1-NEXT: vorps %ymm4, %ymm1, %ymm1 +; AVX1-NEXT: vmovdqa (%r9), %xmm1 +; AVX1-NEXT: vmovdqa 16(%r9), %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [128,128,128,128,128,8,128,128,128,128,128,9,128,128,128,128] +; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm0 +; AVX1-NEXT: vmovdqa (%r8), %xmm5 +; AVX1-NEXT: vmovdqa 16(%r8), %xmm6 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,2,3,4,128,6,7,8,9,10,128,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm11, %xmm6, %xmm14 +; AVX1-NEXT: vpor %xmm0, %xmm14, %xmm9 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,5,128,128,128,128,128,6,128,128,128,128,128,7] +; AVX1-NEXT: vpshufb %xmm10, %xmm2, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,128,4,5,6,7,8,128,10,11,12,13,14,128] +; AVX1-NEXT: vpshufb %xmm0, %xmm5, %xmm7 +; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm12 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,13,128,128,128,128,128,14,128,128,128,128,128,15] +; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,2,3,4,5,6,128,8,9,10,11,12,128,14,15] +; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm6 +; AVX1-NEXT: vpshufb %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpshufb %xmm11, %xmm5, %xmm5 +; AVX1-NEXT: vpshufb %xmm13, %xmm2, %xmm4 +; AVX1-NEXT: vpor %xmm4, %xmm0, %xmm11 +; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm4 +; AVX1-NEXT: vpor %xmm4, %xmm14, %xmm8 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm14 = [128,10,128,128,128,128,128,11,128,128,128,128,128,12,128,128] +; AVX1-NEXT: vpshufb %xmm10, %xmm1, %xmm4 +; AVX1-NEXT: vpor %xmm4, %xmm7, %xmm15 +; AVX1-NEXT: vpshufb %xmm14, %xmm2, %xmm7 +; AVX1-NEXT: vpor %xmm7, %xmm3, %xmm10 +; AVX1-NEXT: vpshufb %xmm13, %xmm1, %xmm7 +; AVX1-NEXT: vpor %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [128,2,128,128,128,128,128,3,128,128,128,128,128,4,128,128] +; AVX1-NEXT: vpshufb %xmm14, %xmm1, %xmm4 +; AVX1-NEXT: vpor %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpshufb %xmm7, %xmm1, %xmm4 +; AVX1-NEXT: vpor %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpshufb %xmm7, %xmm2, %xmm7 +; AVX1-NEXT: vpor %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,128,128,0,128,128,128,128,128,1,128,128,128,128] +; AVX1-NEXT: vpshufb %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpor %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2 ; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: vmovaps %ymm1, 160(%rax) -; AVX1-NEXT: vmovaps %ymm0, 64(%rax) -; AVX1-NEXT: vmovaps %ymm2, (%rax) -; AVX1-NEXT: vmovaps %ymm3, 128(%rax) -; AVX1-NEXT: vmovaps %ymm12, 32(%rax) -; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovaps %ymm0, 96(%rax) -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovdqa %xmm2, 96(%rax) +; AVX1-NEXT: vmovdqa %xmm6, 112(%rax) +; AVX1-NEXT: vmovdqa %xmm3, 64(%rax) +; AVX1-NEXT: vmovdqa %xmm0, 80(%rax) +; AVX1-NEXT: vmovdqa %xmm15, 32(%rax) +; AVX1-NEXT: vmovdqa %xmm8, 48(%rax) +; AVX1-NEXT: vmovdqa %xmm1, (%rax) +; AVX1-NEXT: vmovdqa %xmm4, 16(%rax) +; AVX1-NEXT: vmovdqa %xmm10, 160(%rax) +; AVX1-NEXT: vmovdqa %xmm11, 176(%rax) +; AVX1-NEXT: vmovdqa %xmm12, 128(%rax) +; AVX1-NEXT: vmovdqa %xmm9, 144(%rax) ; AVX1-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride6_vf32: ; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: subq $24, %rsp -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm8 -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm11 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm4 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm12 -; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm9 -; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm2 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm2, %xmm1 -; AVX2-SLOW-NEXT: vmovdqa %xmm2, %xmm15 -; AVX2-SLOW-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm6 -; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm6, %xmm0 -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,0,1] -; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm7 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm7, %xmm3 -; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm5 -; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm5, %xmm2 -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm1, %ymm2, %ymm1 -; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm1 -; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm2 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm4, %ymm13 -; AVX2-SLOW-NEXT: vmovdqa %ymm4, %ymm0 -; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm11, %ymm3 -; AVX2-SLOW-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm13[0],ymm3[1],ymm13[1],ymm3[2],ymm13[2],ymm3[3],ymm13[3],ymm3[4],ymm13[4],ymm3[5],ymm13[5],ymm3[6],ymm13[6],ymm3[7],ymm13[7],ymm3[16],ymm13[16],ymm3[17],ymm13[17],ymm3[18],ymm13[18],ymm3[19],ymm13[19],ymm3[20],ymm13[20],ymm3[21],ymm13[21],ymm3[22],ymm13[22],ymm3[23],ymm13[23] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm9, %ymm14 -; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm12, %ymm13 -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm3, %ymm13, %ymm3 -; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,2,1,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3],xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm15 -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm8[0],ymm15[0],ymm8[1],ymm15[1],ymm8[2],ymm15[2],ymm8[3],ymm15[3],ymm8[4],ymm15[4],ymm8[5],ymm15[5],ymm8[6],ymm15[6],ymm8[7],ymm15[7],ymm8[16],ymm15[16],ymm8[17],ymm15[17],ymm8[18],ymm15[18],ymm8[19],ymm15[19],ymm8[20],ymm15[20],ymm8[21],ymm15[21],ymm8[22],ymm15[22],ymm8[23],ymm15[23] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm3[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm11[0],ymm0[0],ymm11[1],ymm0[1],ymm11[2],ymm0[2],ymm11[3],ymm0[3],ymm11[4],ymm0[4],ymm11[5],ymm0[5],ymm11[6],ymm0[6],ymm11[7],ymm0[7],ymm11[16],ymm0[16],ymm11[17],ymm0[17],ymm11[18],ymm0[18],ymm11[19],ymm0[19],ymm11[20],ymm0[20],ymm11[21],ymm0[21],ymm11[22],ymm0[22],ymm11[23],ymm0[23] -; AVX2-SLOW-NEXT: vmovdqa %ymm0, %ymm11 -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm4[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm3, %ymm4, %ymm3 -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1] -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm4, %ymm13, %ymm4 -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm12[8],ymm9[8],ymm12[9],ymm9[9],ymm12[10],ymm9[10],ymm12[11],ymm9[11],ymm12[12],ymm9[12],ymm12[13],ymm9[13],ymm12[14],ymm9[14],ymm12[15],ymm9[15],ymm12[24],ymm9[24],ymm12[25],ymm9[25],ymm12[26],ymm9[26],ymm12[27],ymm9[27],ymm12[28],ymm9[28],ymm12[29],ymm9[29],ymm12[30],ymm9[30],ymm12[31],ymm9[31] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm8[8],ymm15[8],ymm8[9],ymm15[9],ymm8[10],ymm15[10],ymm8[11],ymm15[11],ymm8[12],ymm15[12],ymm8[13],ymm15[13],ymm8[14],ymm15[14],ymm8[15],ymm15[15],ymm8[24],ymm15[24],ymm8[25],ymm15[25],ymm8[26],ymm15[26],ymm8[27],ymm15[27],ymm8[28],ymm15[28],ymm8[29],ymm15[29],ymm8[30],ymm15[30],ymm8[31],ymm15[31] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm13, %ymm14, %ymm10 -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm13 = -; AVX2-SLOW-NEXT: vpshufb %xmm13, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vpshufb %xmm13, %xmm2, %xmm2 -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1] -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u> -; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm15, %ymm14 -; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm8, %ymm8 -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm8 = ymm8[0],ymm14[0],ymm8[1],ymm14[1],ymm8[2],ymm14[2],ymm8[3],ymm14[3],ymm8[4],ymm14[4],ymm8[5],ymm14[5],ymm8[6],ymm14[6],ymm8[7],ymm14[7],ymm8[16],ymm14[16],ymm8[17],ymm14[17],ymm8[18],ymm14[18],ymm8[19],ymm14[19],ymm8[20],ymm14[20],ymm8[21],ymm14[21],ymm8[22],ymm14[22],ymm8[23],ymm14[23] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload -; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm12[0],ymm9[0],ymm12[1],ymm9[1],ymm12[2],ymm9[2],ymm12[3],ymm9[3],ymm12[4],ymm9[4],ymm12[5],ymm9[5],ymm12[6],ymm9[6],ymm12[7],ymm9[7],ymm12[16],ymm9[16],ymm12[17],ymm9[17],ymm12[18],ymm9[18],ymm12[19],ymm9[19],ymm12[20],ymm9[20],ymm12[21],ymm9[21],ymm12[22],ymm9[22],ymm12[23],ymm9[23] -; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm5[2,1,0,3,4,5,6,7,10,9,8,11,12,13,14,15] -; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm3, %ymm5, %ymm3 -; AVX2-SLOW-NEXT: vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm5 # 16-byte Folded Reload -; AVX2-SLOW-NEXT: # xmm5 = xmm6[8],mem[8],xmm6[9],mem[9],xmm6[10],mem[10],xmm6[11],mem[11],xmm6[12],mem[12],xmm6[13],mem[13],xmm6[14],mem[14],xmm6[15],mem[15] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm4, %ymm5, %ymm4 -; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload -; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm5[8],ymm11[8],ymm5[9],ymm11[9],ymm5[10],ymm11[10],ymm5[11],ymm11[11],ymm5[12],ymm11[12],ymm5[13],ymm11[13],ymm5[14],ymm11[14],ymm5[15],ymm11[15],ymm5[24],ymm11[24],ymm5[25],ymm11[25],ymm5[26],ymm11[26],ymm5[27],ymm11[27],ymm5[28],ymm11[28],ymm5[29],ymm11[29],ymm5[30],ymm11[30],ymm5[31],ymm11[31] -; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31] -; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm10, %ymm5, %ymm2 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-SLOW-NEXT: vmovdqa %ymm8, 128(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm2, 160(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm4, 64(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm3, 96(%rax) -; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rax) -; AVX2-SLOW-NEXT: addq $24, %rsp +; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm0 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %ymm1 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm2 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3] +; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0] +; AVX2-SLOW-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1 +; AVX2-SLOW-NEXT: vmovdqa (%r9), %xmm7 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm8, %ymm3 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm8, %ymm5 +; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15] +; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1] +; AVX2-SLOW-NEXT: vpblendvb %ymm6, %ymm0, %ymm7, %ymm0 +; AVX2-SLOW-NEXT: vmovdqa %ymm1, 160(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm4, 128(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm2, 96(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm5, 32(%rax) +; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rax) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride6_vf32: ; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: subq $72, %rsp -; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm11 -; AVX2-FAST-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm3 -; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm8 -; AVX2-FAST-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm6 -; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm2 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm1 -; AVX2-FAST-NEXT: vmovdqa %xmm2, %xmm7 -; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm15 -; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm15, %xmm0 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1] -; AVX2-FAST-NEXT: vmovdqa (%r9), %xmm14 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm14, %xmm2 -; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm4 -; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm4, %xmm1 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255> -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0 -; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm3, %ymm12 -; AVX2-FAST-NEXT: vmovdqa %ymm3, %ymm10 -; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm11, %ymm2 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm12[0],ymm2[1],ymm12[1],ymm2[2],ymm12[2],ymm2[3],ymm12[3],ymm2[4],ymm12[4],ymm2[5],ymm12[5],ymm2[6],ymm12[6],ymm2[7],ymm12[7],ymm2[16],ymm12[16],ymm2[17],ymm12[17],ymm2[18],ymm12[18],ymm2[19],ymm12[19],ymm2[20],ymm12[20],ymm2[21],ymm12[21],ymm2[22],ymm12[22],ymm2[23],ymm12[23] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm6, %ymm13 -; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm8, %ymm12 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm2, %ymm12, %ymm2 -; AVX2-FAST-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1] -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm2, %ymm3, %ymm2 -; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1] -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3],xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7] -; AVX2-FAST-NEXT: vmovdqa %xmm7, %xmm11 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,3,0,1,6,7,4,5,8,9,8,9,8,9,8,9] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm2, %ymm3, %ymm2 -; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm2 -; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm3 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3] -; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[4],ymm10[4],ymm7[5],ymm10[5],ymm7[6],ymm10[6],ymm7[7],ymm10[7],ymm7[16],ymm10[16],ymm7[17],ymm10[17],ymm7[18],ymm10[18],ymm7[19],ymm10[19],ymm7[20],ymm10[20],ymm7[21],ymm10[21],ymm7[22],ymm10[22],ymm7[23],ymm10[23] -; AVX2-FAST-NEXT: vmovdqa %ymm10, %ymm8 -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,16,17,22,23,20,21,24,25,24,25,24,25,24,25] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm12, %ymm13, %ymm12 -; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} ymm13 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3] -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm13, %ymm10, %ymm9 -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = -; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm0, %xmm0 -; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1] -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255] -; AVX2-FAST-NEXT: vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload -; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u> -; AVX2-FAST-NEXT: vpshufb %ymm10, %ymm3, %ymm3 -; AVX2-FAST-NEXT: vpshufb %ymm10, %ymm2, %ymm2 -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm1, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm15[8],xmm11[8],xmm15[9],xmm11[9],xmm15[10],xmm11[10],xmm15[11],xmm11[11],xmm15[12],xmm11[12],xmm15[13],xmm11[13],xmm15[14],xmm11[14],xmm15[15],xmm11[15] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] -; AVX2-FAST-NEXT: vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1] -; AVX2-FAST-NEXT: vpblendvb %ymm1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload -; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm1, %ymm12, %ymm5, %ymm5 -; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31] -; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31] -; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3] -; AVX2-FAST-NEXT: vpblendvb %ymm1, %ymm9, %ymm6, %ymm1 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX2-FAST-NEXT: vmovdqa %ymm2, 128(%rax) +; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0 +; AVX2-FAST-NEXT: vmovdqa (%r9), %ymm1 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm2 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm4 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,u,29,u,28,u,27,u,30,u,u,u,u,u,31] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3] +; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0] +; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1 +; AVX2-FAST-NEXT: vmovdqa (%r9), %xmm7 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1] +; AVX2-FAST-NEXT: vpblendvb %ymm3, %ymm0, %ymm8, %ymm3 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1] +; AVX2-FAST-NEXT: vpblendvb %ymm5, %ymm0, %ymm8, %ymm5 +; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15] +; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1] +; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm7, %ymm0 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 160(%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm5, 96(%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm4, (%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm3, 64(%rax) -; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rax) -; AVX2-FAST-NEXT: addq $72, %rsp +; AVX2-FAST-NEXT: vmovdqa %ymm4, 128(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm2, 96(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm5, 32(%rax) +; AVX2-FAST-NEXT: vmovdqa %ymm3, (%rax) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX512-LABEL: store_i8_stride6_vf32: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %ymm8 -; AVX512-NEXT: vmovdqa (%rsi), %ymm9 -; AVX512-NEXT: vmovdqa (%rdx), %ymm10 -; AVX512-NEXT: vmovdqa (%rcx), %ymm11 -; AVX512-NEXT: vmovdqa (%r8), %ymm12 -; AVX512-NEXT: vmovdqa (%r9), %ymm13 -; AVX512-NEXT: vmovdqa (%rsi), %xmm6 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = -; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm0 -; AVX512-NEXT: vmovdqa (%rdi), %xmm1 -; AVX512-NEXT: vpshufb %xmm7, %xmm1, %xmm7 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] -; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1] -; AVX512-NEXT: vmovdqa (%rcx), %xmm14 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX512-NEXT: vpshufb %xmm2, %xmm14, %xmm3 -; AVX512-NEXT: vmovdqa (%rdx), %xmm4 -; AVX512-NEXT: vpshufb %xmm2, %xmm4, %xmm2 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1] -; AVX512-NEXT: movw $18724, %cx # imm = 0x4924 +; AVX512-NEXT: vmovdqa (%rdx), %ymm0 +; AVX512-NEXT: vmovdqa (%rcx), %ymm1 +; AVX512-NEXT: vmovdqa (%r8), %ymm2 +; AVX512-NEXT: vmovdqa (%r9), %ymm3 +; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] +; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15] +; AVX512-NEXT: movw $-28087, %cx # imm = 0x9249 ; AVX512-NEXT: kmovd %ecx, %k1 -; AVX512-NEXT: vmovdqu16 %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa (%r9), %xmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm5 -; AVX512-NEXT: vmovdqa (%r8), %xmm7 -; AVX512-NEXT: vpshufb %xmm3, %xmm7, %xmm3 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] +; AVX512-NEXT: vmovdqa %ymm1, %ymm6 +; AVX512-NEXT: vpermw %ymm4, %ymm5, %ymm6 {%k1} +; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm5 +; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm4 +; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23] +; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] ; AVX512-NEXT: movw $9362, %cx # imm = 0x2492 ; AVX512-NEXT: kmovd %ecx, %k2 -; AVX512-NEXT: vmovdqu16 %ymm3, %ymm2 {%k2} -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5] -; AVX512-NEXT: vpermw %ymm3, %ymm5, %ymm3 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; AVX512-NEXT: vprold $16, %xmm5, %xmm5 -; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1] -; AVX512-NEXT: vmovdqu16 %ymm5, %ymm3 {%k2} -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4] -; AVX512-NEXT: vpermw %ymm5, %ymm15, %ymm3 {%k1} -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23] -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[16],ymm9[16],ymm8[17],ymm9[17],ymm8[18],ymm9[18],ymm8[19],ymm9[19],ymm8[20],ymm9[20],ymm8[21],ymm9[21],ymm8[22],ymm9[22],ymm8[23],ymm9[23] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13] -; AVX512-NEXT: vpermw %ymm5, %ymm15, %ymm5 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = [10,9,8,11,10,9,8,11,10,9,8,11,12,12,12,12] -; AVX512-NEXT: vpermw %ymm3, %ymm15, %ymm5 {%k1} -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[16],ymm11[16],ymm10[17],ymm11[17],ymm10[18],ymm11[18],ymm10[19],ymm11[19],ymm10[20],ymm11[20],ymm10[21],ymm11[21],ymm10[22],ymm11[22],ymm10[23],ymm11[23] -; AVX512-NEXT: vprold $16, %ymm3, %ymm3 -; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3] -; AVX512-NEXT: vmovdqu16 %ymm3, %ymm5 {%k2} -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15] -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] +; AVX512-NEXT: vpblendmw %ymm4, %ymm0, %ymm4 {%k2} +; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm8 +; AVX512-NEXT: vmovdqa (%r9), %xmm5 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %xmm6, %xmm5, %xmm7 +; AVX512-NEXT: vmovdqa (%r8), %xmm4 +; AVX512-NEXT: vpshufb %xmm6, %xmm4, %xmm6 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1] +; AVX512-NEXT: vpblendmw %ymm6, %ymm1, %ymm6 {%k2} +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4] +; AVX512-NEXT: movw $18724, %cx # imm = 0x4924 +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: vmovdqa %ymm0, %ymm10 +; AVX512-NEXT: vpermw %ymm7, %ymm9, %ymm10 {%k2} +; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm10, %zmm6 +; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] +; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [10,9,8,11,10,9,8,11,10,9,8,11,12,12,12,12] +; AVX512-NEXT: vpermw %ymm2, %ymm3, %ymm1 {%k2} +; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15] ; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7] -; AVX512-NEXT: vpermw %ymm0, %ymm3, %ymm0 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7] -; AVX512-NEXT: vpermw %ymm1, %ymm3, %ymm0 {%k2} -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7] -; AVX512-NEXT: vpermw %ymm1, %ymm3, %ymm0 {%k1} -; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0 -; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15],ymm10[24],ymm11[24],ymm10[25],ymm11[25],ymm10[26],ymm11[26],ymm10[27],ymm11[27],ymm10[28],ymm11[28],ymm10[29],ymm11[29],ymm10[30],ymm11[30],ymm10[31],ymm11[31] -; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15],ymm12[24],ymm13[24],ymm12[25],ymm13[25],ymm12[26],ymm13[26],ymm12[27],ymm13[27],ymm12[28],ymm13[28],ymm12[29],ymm13[29],ymm12[30],ymm13[30],ymm12[31],ymm13[31] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15] -; AVX512-NEXT: vpermw %ymm3, %ymm4, %ymm3 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15] -; AVX512-NEXT: vpermw %ymm1, %ymm4, %ymm3 {%k1} -; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm8[8],ymm9[8],ymm8[9],ymm9[9],ymm8[10],ymm9[10],ymm8[11],ymm9[11],ymm8[12],ymm9[12],ymm8[13],ymm9[13],ymm8[14],ymm9[14],ymm8[15],ymm9[15],ymm8[24],ymm9[24],ymm8[25],ymm9[25],ymm8[26],ymm9[26],ymm8[27],ymm9[27],ymm8[28],ymm9[28],ymm8[29],ymm9[29],ymm8[30],ymm9[30],ymm8[31],ymm9[31] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15] -; AVX512-NEXT: vpermw %ymm1, %ymm4, %ymm3 {%k2} -; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u> -; AVX512-NEXT: vpshufb %ymm1, %ymm13, %ymm4 -; AVX512-NEXT: vpshufb %ymm1, %ymm12, %ymm1 -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[16],ymm4[16],ymm1[17],ymm4[17],ymm1[18],ymm4[18],ymm1[19],ymm4[19],ymm1[20],ymm4[20],ymm1[21],ymm4[21],ymm1[22],ymm4[22],ymm1[23],ymm4[23] -; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u> -; AVX512-NEXT: vpshufb %ymm4, %ymm11, %ymm5 -; AVX512-NEXT: vpshufb %ymm4, %ymm10, %ymm4 -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23] -; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3] -; AVX512-NEXT: vmovdqu16 %ymm1, %ymm4 {%k2} -; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u> -; AVX512-NEXT: vpshufb %ymm1, %ymm9, %ymm5 -; AVX512-NEXT: vpshufb %ymm1, %ymm8, %ymm1 -; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[16],ymm5[16],ymm1[17],ymm5[17],ymm1[18],ymm5[18],ymm1[19],ymm5[19],ymm1[20],ymm5[20],ymm1[21],ymm5[21],ymm1[22],ymm5[22],ymm1[23],ymm5[23] -; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3] -; AVX512-NEXT: vmovdqu16 %ymm1, %ymm4 {%k1} -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm1 -; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rax) +; AVX512-NEXT: vpermw %ymm2, %ymm3, %ymm0 {%k1} +; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512-NEXT: vmovdqu64 %zmm0, 64(%rax) -; AVX512-NEXT: vmovdqu64 %zmm2, (%rax) +; AVX512-NEXT: vmovdqu64 %zmm6, (%rax) +; AVX512-NEXT: vmovdqu64 %zmm8, 128(%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %in.vec0 = load <32 x i8>, <32 x i8>* %in.vecptr0, align 32 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3162,106 +3162,63 @@ define void @PR45604(<32 x i16>* %dst, <8 x i16>* %src) { ; SSE2-LABEL: PR45604: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa (%rsi), %xmm1 -; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: movzwl %ax, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movl $11, %eax -; SSE2-NEXT: pinsrw $2, %eax, %xmm0 -; SSE2-NEXT: pextrw $1, %xmm1, %ecx -; SSE2-NEXT: pinsrw $4, %ecx, %xmm0 -; SSE2-NEXT: pinsrw $6, %eax, %xmm0 -; SSE2-NEXT: pextrw $2, %xmm1, %ecx -; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: pinsrw $2, %eax, %xmm2 -; SSE2-NEXT: pextrw $3, %xmm1, %ecx -; SSE2-NEXT: pinsrw $4, %ecx, %xmm2 -; SSE2-NEXT: pinsrw $6, %eax, %xmm2 -; SSE2-NEXT: pextrw $4, %xmm1, %ecx -; SSE2-NEXT: movd %ecx, %xmm3 -; SSE2-NEXT: pinsrw $2, %eax, %xmm3 -; SSE2-NEXT: pextrw $5, %xmm1, %ecx -; SSE2-NEXT: pinsrw $4, %ecx, %xmm3 -; SSE2-NEXT: pinsrw $6, %eax, %xmm3 -; SSE2-NEXT: pextrw $6, %xmm1, %ecx -; SSE2-NEXT: movd %ecx, %xmm4 -; SSE2-NEXT: pinsrw $2, %eax, %xmm4 -; SSE2-NEXT: pextrw $7, %xmm1, %ecx -; SSE2-NEXT: pinsrw $4, %ecx, %xmm4 -; SSE2-NEXT: pinsrw $6, %eax, %xmm4 -; SSE2-NEXT: movdqa %xmm4, 48(%rdi) -; SSE2-NEXT: movdqa %xmm3, 32(%rdi) -; SSE2-NEXT: movdqa %xmm2, 16(%rdi) -; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: movdqa (%rsi), %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [11,11,0,11] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-NEXT: movdqa %xmm0, 32(%rdi) +; SSE2-NEXT: movdqa %xmm2, (%rdi) +; SSE2-NEXT: movdqa %xmm1, 48(%rdi) +; SSE2-NEXT: movdqa %xmm3, 16(%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: PR45604: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa (%rsi), %xmm1 -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: movzwl %ax, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movl $11, %eax -; SSSE3-NEXT: pinsrw $2, %eax, %xmm0 -; SSSE3-NEXT: pextrw $1, %xmm1, %ecx -; SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 -; SSSE3-NEXT: pinsrw $6, %eax, %xmm0 -; SSSE3-NEXT: pextrw $2, %xmm1, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: pinsrw $2, %eax, %xmm2 -; SSSE3-NEXT: pextrw $3, %xmm1, %ecx -; SSSE3-NEXT: pinsrw $4, %ecx, %xmm2 -; SSSE3-NEXT: pinsrw $6, %eax, %xmm2 -; SSSE3-NEXT: pextrw $4, %xmm1, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: pinsrw $2, %eax, %xmm3 -; SSSE3-NEXT: pextrw $5, %xmm1, %ecx -; SSSE3-NEXT: pinsrw $4, %ecx, %xmm3 -; SSSE3-NEXT: pinsrw $6, %eax, %xmm3 -; SSSE3-NEXT: pextrw $6, %xmm1, %ecx -; SSSE3-NEXT: movd %ecx, %xmm4 -; SSSE3-NEXT: pinsrw $2, %eax, %xmm4 -; SSSE3-NEXT: pextrw $7, %xmm1, %ecx -; SSSE3-NEXT: pinsrw $4, %ecx, %xmm4 -; SSSE3-NEXT: pinsrw $6, %eax, %xmm4 -; SSSE3-NEXT: movdqa %xmm4, 48(%rdi) -; SSSE3-NEXT: movdqa %xmm3, 32(%rdi) -; SSSE3-NEXT: movdqa %xmm2, 16(%rdi) -; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: movdqa (%rsi), %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] +; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [11,11,0,11] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSSE3-NEXT: movdqa %xmm0, 32(%rdi) +; SSSE3-NEXT: movdqa %xmm2, (%rdi) +; SSSE3-NEXT: movdqa %xmm1, 48(%rdi) +; SSSE3-NEXT: movdqa %xmm3, 16(%rdi) ; SSSE3-NEXT: retq ; ; SSE41-LABEL: PR45604: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa (%rsi), %xmm1 -; SSE41-NEXT: pextrw $2, %xmm1, %eax -; SSE41-NEXT: movd %eax, %xmm0 -; SSE41-NEXT: movl $11, %eax -; SSE41-NEXT: pinsrw $2, %eax, %xmm0 -; SSE41-NEXT: pextrw $3, %xmm1, %ecx -; SSE41-NEXT: pinsrw $4, %ecx, %xmm0 -; SSE41-NEXT: pinsrw $6, %eax, %xmm0 -; SSE41-NEXT: pextrw $4, %xmm1, %ecx -; SSE41-NEXT: movd %ecx, %xmm2 -; SSE41-NEXT: pinsrw $2, %eax, %xmm2 -; SSE41-NEXT: pextrw $5, %xmm1, %ecx -; SSE41-NEXT: pinsrw $4, %ecx, %xmm2 -; SSE41-NEXT: pinsrw $6, %eax, %xmm2 -; SSE41-NEXT: pextrw $6, %xmm1, %ecx -; SSE41-NEXT: movd %ecx, %xmm3 -; SSE41-NEXT: pinsrw $2, %eax, %xmm3 -; SSE41-NEXT: pextrw $7, %xmm1, %ecx -; SSE41-NEXT: pinsrw $4, %ecx, %xmm3 -; SSE41-NEXT: pinsrw $6, %eax, %xmm3 -; SSE41-NEXT: pxor %xmm4, %xmm4 -; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3,4,5,6,7] -; SSE41-NEXT: pinsrw $2, %eax, %xmm4 -; SSE41-NEXT: pextrw $1, %xmm1, %ecx -; SSE41-NEXT: pinsrw $4, %ecx, %xmm4 -; SSE41-NEXT: pinsrw $6, %eax, %xmm4 -; SSE41-NEXT: movdqa %xmm4, (%rdi) -; SSE41-NEXT: movdqa %xmm3, 48(%rdi) -; SSE41-NEXT: movdqa %xmm2, 32(%rdi) -; SSE41-NEXT: movdqa %xmm0, 16(%rdi) +; SSE41-NEXT: movdqa (%rsi), %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [11,11,11,0,11,11,11,0] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3] +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5],xmm2[6,7] +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: movdqa %xmm4, 48(%rdi) +; SSE41-NEXT: movdqa %xmm3, 32(%rdi) +; SSE41-NEXT: movdqa %xmm1, 16(%rdi) ; SSE41-NEXT: retq ; ; AVX1-LABEL: PR45604: