Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -51,10 +51,6 @@ namespace { static cl::opt<bool> - CombinerAA("combiner-alias-analysis", cl::Hidden, - cl::desc("Enable DAG combiner alias-analysis heuristics")); - - static cl::opt<bool> CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, cl::desc("Enable DAG combiner's use of IR alias analysis")); @@ -397,15 +393,12 @@ /// Holds a pointer to an LSBaseSDNode as well as information on where it /// is located in a sequence of memory operations connected by a chain. struct MemOpLink { - MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): - MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } + MemOpLink(LSBaseSDNode *N, int64_t Offset) + : MemNode(N), OffsetFromBase(Offset) {} // Ptr to the mem node. LSBaseSDNode *MemNode; // Offset from the base ptr. int64_t OffsetFromBase; - // What is the sequence number of this mem node. - // Lowest mem operand in the DAG starts at zero. - unsigned SequenceNum; }; /// This is a helper function for visitMUL to check the profitability @@ -421,7 +414,6 @@ SDValue getMergedConstantVectorStore(SelectionDAG &DAG, SDLoc SL, ArrayRef<MemOpLink> Stores, - SmallVectorImpl<SDValue> &Chains, EVT Ty) const; /// This is a helper function for visitAND and visitZERO_EXTEND. Returns @@ -443,10 +435,9 @@ /// This is a helper function for MergeConsecutiveStores. /// Stores that may be merged are placed in StoreNodes. - /// Loads that may alias with those stores are placed in AliasLoadNodes. - void getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, - SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes); + void + getStoreMergeAndAliasCandidates(StoreSDNode *St, + SmallVectorImpl<MemOpLink> &StoreNodes); /// Helper function for MergeConsecutiveStores. Checks if /// Candidate stores have indirect dependency through their @@ -1601,11 +1592,9 @@ Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); } - // Add users to worklist if AA is enabled, since it may introduce - // a lot of new chained token factors while removing memory deps. - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - return CombineTo(N, Result, UseAA /*add to worklist*/); + // Add users to worklist, since we may introduce a lot of new + // chained token factors while removing memory deps. + return CombineTo(N, Result, true /*add to worklist*/); } return Result; @@ -9999,11 +9988,22 @@ // TODO: Handle store large -> read small portion. // TODO: Handle TRUNCSTORE/LOADEXT if (ISD::isNormalLoad(N) && !LD->isVolatile()) { - if (ISD::isNON_TRUNCStore(Chain.getNode())) { + // Either a direct store, or a store off of a TokenFactor can be + // forwarded. + if (Chain->getOpcode() == ISD::TokenFactor) { + for (const SDValue &ChainOp : Chain->op_values()) { + if (ISD::isNON_TRUNCStore(ChainOp.getNode())) { + StoreSDNode *PrevST = cast<StoreSDNode>(ChainOp); + if (PrevST->getBasePtr() == Ptr && + PrevST->getValue().getValueType() == N->getValueType(0)) + return CombineTo(N, PrevST->getOperand(1), Chain); + } + } + } else if (ISD::isNON_TRUNCStore(Chain.getNode())) { StoreSDNode *PrevST = cast<StoreSDNode>(Chain); if (PrevST->getBasePtr() == Ptr && PrevST->getValue().getValueType() == N->getValueType(0)) - return CombineTo(N, Chain.getOperand(1), Chain); + return CombineTo(N, PrevST->getOperand(1), Chain); } } @@ -10024,14 +10024,7 @@ } } - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && LD->isUnindexed()) { + if (LD->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes. SDValue BetterChain = FindBetterChain(N, Chain); @@ -11117,13 +11110,11 @@ SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG, SDLoc SL, ArrayRef<MemOpLink> Stores, - SmallVectorImpl<SDValue> &Chains, EVT Ty) const { SmallVector<SDValue, 8> BuildVector; for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { StoreSDNode *St = cast<StoreSDNode>(Stores[I].MemNode); - Chains.push_back(St->getChain()); BuildVector.push_back(St->getValue()); } @@ -11139,21 +11130,10 @@ int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; - unsigned LatestNodeUsed = 0; - - for (unsigned i=0; i < NumStores; ++i) { - // Find a chain for the new wide-store operand. Notice that some - // of the store nodes that we found may not be selected for inclusion - // in the wide store. The chain we use needs to be the chain of the - // latest store node which is *used* and replaced by the wide store. - if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) - LatestNodeUsed = i; - } SmallVector<SDValue, 8> Chains; // The latest Node in the DAG. - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; SDLoc DL(StoreNodes[0].MemNode); SDValue StoredVal; @@ -11169,7 +11149,7 @@ assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); if (IsConstantSrc) { - StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Chains, Ty); + StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty); } else { SmallVector<SDValue, 8> Ops; for (unsigned i = 0; i < NumStores; ++i) { @@ -11179,7 +11159,6 @@ if (Val.getValueType() != MemVT) return false; Ops.push_back(Val); - Chains.push_back(St->getChain()); } // Build the extracted vector elements back into a vector. @@ -11199,7 +11178,6 @@ for (unsigned i = 0; i < NumStores; ++i) { unsigned Idx = IsLE ? (NumStores - 1 - i) : i; StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); - Chains.push_back(St->getChain()); SDValue Val = St->getValue(); StoreInt <<= ElementSizeBytes * 8; @@ -11217,7 +11195,9 @@ StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); } - assert(!Chains.empty()); + // Gather all Chains we're inheriting + for (unsigned i = 0; i < NumStores; ++i) + Chains.push_back(StoreNodes[i].MemNode->getChain()); SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, @@ -11226,45 +11206,19 @@ false, false, FirstInChain->getAlignment()); - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA) { - // Replace all merged stores with the new store. - for (unsigned i = 0; i < NumStores; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumStores; ++i) { - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); - // ReplaceAllUsesWith will replace all uses that existed when it was - // called, but graph optimizations may cause new ones to appear. For - // example, the case in pr14333 looks like - // - // St's chain -> St -> another store -> X - // - // And the only difference from St to the other store is the chain. - // When we change it's chain to be St's chain they become identical, - // get CSEed and the net result is that X is now a use of St. - // Since we know that St is redundant, just iterate. - while (!St->use_empty()) - DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace all merged stores with the new store + for (unsigned i = 0; i < NumStores; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } void DAGCombiner::getStoreMergeAndAliasCandidates( - StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, - SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) { + StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); + EVT MemVT = St->getMemoryVT(); // We must have a base and an offset. if (!BasePtr.Base.getNode()) @@ -11274,104 +11228,38 @@ if (BasePtr.Base.isUndef()) return; - // Walk up the chain and look for nodes with offsets from the same - // base pointer. Stop when reaching an instruction with a different kind - // or instruction which has a different base pointer. - EVT MemVT = St->getMemoryVT(); - unsigned Seq = 0; - StoreSDNode *Index = St; - - - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); + // We looking for a root node which is an ancestor to all mergable + // stores. We search up through a load, to our root and then down + // through all children. FIXME: We should be able to climb and + // descend TokenFactors to find candidates as well. - if (UseAA) { - // Look at other users of the same chain. Stores on the same chain do not - // alias. If combiner-aa is enabled, non-aliasing stores are canonicalized - // to be on the same chain, so don't bother looking at adjacent chains. + SDNode *RootNode = (St->getChain()).getNode(); - SDValue Chain = St->getChain(); - for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { - if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) { - if (I.getOperandNo() != 0) - continue; - - if (OtherST->isVolatile() || OtherST->isIndexed()) - continue; + // Set of Parents of Candidates + std::set<SDNode *> CandidateParents; - if (OtherST->getMemoryVT() != MemVT) - continue; - - BaseIndexOffset Ptr = BaseIndexOffset::match(OtherST->getBasePtr(), DAG); - - if (Ptr.equalBaseIndex(BasePtr)) - StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset, Seq++)); - } - } - - return; - } - - while (Index) { - // If the chain has more than one use, then we can't reorder the mem ops. - if (Index != St && !SDValue(Index, 0)->hasOneUse()) - break; - - // Find the base pointer and offset for this memory node. - BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); - - // Check that the base pointer is the same as the original one. - if (!Ptr.equalBaseIndex(BasePtr)) - break; - - // The memory operands must not be volatile. - if (Index->isVolatile() || Index->isIndexed()) - break; - - // No truncation. - if (Index->isTruncatingStore()) - break; - - // The stored memory type must be the same. - if (Index->getMemoryVT() != MemVT) - break; - - // We do not allow under-aligned stores in order to prevent - // overriding stores. NOTE: this is a bad hack. Alignment SHOULD - // be irrelevant here; what MATTERS is that we not move memory - // operations that potentially overlap past each-other. - if (Index->getAlignment() < MemVT.getStoreSize()) - break; - - // We found a potential memory operand to merge. - StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); - - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. - SDNode *NextInChain = Index->getChain().getNode(); - while (1) { - if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { - // We found a store node. Use it for the next iteration. - Index = STn; - break; - } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { - if (Ldn->isVolatile()) { - Index = nullptr; - break; + if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(RootNode)) { + RootNode = Ldn->getChain().getNode(); + for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I) + if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) // walk down chain + CandidateParents.insert(*I); + } else + CandidateParents.insert(RootNode); + + // check all parents of mergable children + for (auto P = CandidateParents.begin(); P != CandidateParents.end(); ++P) + for (auto I = (*P)->use_begin(), E = (*P)->use_end(); I != E; ++I) + if (I.getOperandNo() == 0) + if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) { + if (OtherST->isVolatile() || OtherST->isIndexed()) + continue; + if (OtherST->getMemoryVT() != MemVT) + continue; + BaseIndexOffset Ptr = + BaseIndexOffset::match(OtherST->getBasePtr(), DAG); + if (Ptr.equalBaseIndex(BasePtr)) + StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset)); } - - // Save the load node for later. Continue the scan. - AliasLoadNodes.push_back(Ldn); - NextInChain = Ldn->getChain().getNode(); - continue; - } else { - Index = nullptr; - break; - } - } - } } // We need to check that merging these stores does not cause a loop @@ -11391,10 +11279,9 @@ Worklist.push_back(n->getOperand(j).getNode()); } // search through DAG. We can stop early if we find a storenode - for (unsigned i = 0; i < StoreNodes.size(); ++i) { + for (unsigned i = 0; i < StoreNodes.size(); ++i) if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist)) return false; - } return true; } @@ -11432,68 +11319,35 @@ if (MemVT.isVector() && IsLoadSrc) return false; - // Only look at ends of store sequences. - SDValue Chain = SDValue(St, 0); - if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) - return false; - - // Save the LoadSDNodes that we find in the chain. - // We need to make sure that these nodes do not interfere with - // any of the store nodes. - SmallVector<LSBaseSDNode*, 8> AliasLoadNodes; - - // Save the StoreSDNodes that we find in the chain. + // Find potential store merge candidates by searching through chain sub-DAG SmallVector<MemOpLink, 8> StoreNodes; - - getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes); + getStoreMergeAndAliasCandidates(St, StoreNodes); // Check if there is anything to merge. if (StoreNodes.size() < 2) return false; - // only do dep endence check in AA case - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); - if (UseAA && !checkMergeStoreCandidatesForDependencies(StoreNodes)) + // Check that we can merge these candidates without causing a cycle + if (!checkMergeStoreCandidatesForDependencies(StoreNodes)) return false; // Sort the memory operands according to their distance from the - // base pointer. As a secondary criteria: make sure stores coming - // later in the code come first in the list. This is important for - // the non-UseAA case, because we're merging stores into the FINAL - // store along a chain which potentially contains aliasing stores. - // Thus, if there are multiple stores to the same address, the last - // one can be considered for merging but not the others. + // base pointer. std::sort(StoreNodes.begin(), StoreNodes.end(), [](MemOpLink LHS, MemOpLink RHS) { - return LHS.OffsetFromBase < RHS.OffsetFromBase || - (LHS.OffsetFromBase == RHS.OffsetFromBase && - LHS.SequenceNum < RHS.SequenceNum); - }); + return LHS.OffsetFromBase < RHS.OffsetFromBase; + }); // Scan the memory operations on the chain and find the first non-consecutive // store memory address. unsigned LastConsecutiveStore = 0; int64_t StartAddress = StoreNodes[0].OffsetFromBase; - for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { - - // Check that the addresses are consecutive starting from the second - // element in the list of stores. - if (i > 0) { - int64_t CurrAddress = StoreNodes[i].OffsetFromBase; - if (CurrAddress - StartAddress != (ElementSizeBytes * i)) - break; - } - - // Check if this store interferes with any of the loads that we found. - // If we find a load that alias with this store. Stop the sequence. - if (std::any_of(AliasLoadNodes.begin(), AliasLoadNodes.end(), - [&](LSBaseSDNode* Ldn) { - return isAlias(Ldn, StoreNodes[i].MemNode); - })) + // Check that the addresses are consecutive starting from the second + // element in the list of stores. + for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) { + int64_t CurrAddress = StoreNodes[i].OffsetFromBase; + if (CurrAddress - StartAddress != (ElementSizeBytes * i)) break; - - // Mark this node as useful. LastConsecutiveStore = i; } @@ -11647,7 +11501,7 @@ } // We found a potential memory operand to merge. - LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); + LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset)); } if (LoadNodes.size() < 2) @@ -11736,22 +11590,8 @@ // Collect the chains from all merged stores. SmallVector<SDValue, 8> MergeStoreChains; - MergeStoreChains.push_back(StoreNodes[0].MemNode->getChain()); - - // The latest Node in the DAG. - unsigned LatestNodeUsed = 0; - for (unsigned i=1; i<NumElem; ++i) { - // Find a chain for the new wide-store operand. Notice that some - // of the store nodes that we found may not be selected for inclusion - // in the wide store. The chain we use needs to be the chain of the - // latest store node which is *used* and replaced by the wide store. - if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) - LatestNodeUsed = i; - + for (unsigned i = 0; i < NumElem; ++i) MergeStoreChains.push_back(StoreNodes[i].MemNode->getChain()); - } - - LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; // Find if it is better to use vectors or integers to load and store // to memory. @@ -11786,23 +11626,9 @@ SDValue(NewLoad.getNode(), 1)); } - if (UseAA) { - // Replace the all stores with the new store. - for (unsigned i = 0; i < NumElem; ++i) - CombineTo(StoreNodes[i].MemNode, NewStore); - } else { - // Replace the last store with the new store. - CombineTo(LatestOp, NewStore); - // Erase all other stores. - for (unsigned i = 0; i < NumElem; ++i) { - // Remove all Store nodes. - if (StoreNodes[i].MemNode == LatestOp) - continue; - StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); - DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); - deleteAndRecombine(St); - } - } + // Replace the all stores with the new store. + for (unsigned i = 0; i < NumElem; ++i) + CombineTo(StoreNodes[i].MemNode, NewStore); return true; } @@ -11962,19 +11788,7 @@ if (SDValue NewST = TransformFPLoadStorePair(N)) return NewST; - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif - if (UseAA && ST->isUnindexed()) { - // FIXME: We should do this even without AA enabled. AA will just allow - // FindBetterChain to work in more situations. The problem with this is that - // any combine that expects memory operations to be on consecutive chains - // first needs to be updated to look for users of the same chain. - + if (ST->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes, on this store and any // adjacent stores. if (findBetterNeighborChains(ST)) { @@ -12006,11 +11820,16 @@ // Otherwise, see if we can simplify the operation with // SimplifyDemandedBits, which only works if the value has a single use. - if (SimplifyDemandedBits(Value, - APInt::getLowBitsSet( - Value.getValueType().getScalarType().getSizeInBits(), - ST->getMemoryVT().getScalarType().getSizeInBits()))) + if (SimplifyDemandedBits( + Value, APInt::getLowBitsSet( + Value.getValueType().getScalarType().getSizeInBits(), + ST->getMemoryVT().getScalarType().getSizeInBits()))) { + // Re-visit the store if anything changed; SimplifyDemandedBits + // will add Value's node back to the worklist if necessary, but + // we also need to re-visit the Store node itself. + AddToWorklist(N); return SDValue(N, 0); + } } // If this is a load followed by a store to the same location, then the store @@ -14746,6 +14565,18 @@ return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); } +// This function tries to collect a bunch of potentially interesting +// nodes to improve the chains of, all at once. This might seem +// redundant, as this function gets called when visiting every store +// node, so why not let the work be done on each store as it's visited? +// +// I believe this is mainly important because MergeConsecutiveStores +// is unable to deal with merging stores of different sizes, so unless +// we improve the chains of all the potential candidates up-front +// before running MergeConsecutiveStores, it might only see some of +// the nodes that will eventually be candidates, and then not be able +// to go from a partially-merged state to the desired final +// fully-merged state. bool DAGCombiner::findBetterNeighborChains(StoreSDNode* St) { // This holds the base pointer, index, and the offset in bytes from the base // pointer. @@ -14781,10 +14612,8 @@ if (!Ptr.equalBaseIndex(BasePtr)) break; - // Find the next memory operand in the chain. If the next operand in the - // chain is a store then move up and continue the scan with the next - // memory operand. If the next operand is a load save it and use alias - // information to check if it interferes with anything. + // Walk up the chain to find the next store node, ignoring any + // intermediate loads. Any other kind of node will halt the loop. SDNode *NextInChain = Index->getChain().getNode(); while (true) { if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { @@ -14803,9 +14632,14 @@ Index = nullptr; break; } - } + } // end while } + // At this point, ChainedStores lists all of the Store nodes + // reachable by iterating up through chain nodes matching the above + // conditions. For each such store identified, try to find an + // earlier chain to attach the store to which won't violate the + // required ordering. bool MadeChange = false; SmallVector<std::pair<StoreSDNode *, SDValue>, 8> BetterChains; Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -811,7 +811,7 @@ MinFunctionAlignment = 0; PrefFunctionAlignment = 0; PrefLoopAlignment = 0; - GatherAllAliasesMaxDepth = 6; + GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; MinimumJumpTableEntries = 4; // TODO: the default will be switched to 0 in the next commit, along Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -417,16 +417,6 @@ setFsqrtIsCheap(true); - // We want to find all load dependencies for long chains of stores to enable - // merging into very wide vectors. The problem is with vectors with > 4 - // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 - // vectors are a legal type, even though we have to split the loads - // usually. When we can more precisely specify load legality per address - // space, we should be able to make FindBetterChain/MergeConsecutiveStores - // smarter so that they can figure out what to do in 2 iterations without all - // N > 4 stores on the same chain. - GatherAllAliasesMaxDepth = 16; - // FIXME: Need to really handle these. MaxStoresPerMemcpy = 4096; MaxStoresPerMemmove = 4096; Index: test/CodeGen/AArch64/argument-blocks.ll =================================================================== --- test/CodeGen/AArch64/argument-blocks.ll +++ test/CodeGen/AArch64/argument-blocks.ll @@ -62,7 +62,7 @@ ; but should go in an 8-byte aligned slot. define void @test_varargs_stackalign() { ; CHECK-LABEL: test_varargs_stackalign: -; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16] +; CHECK-DARWINPCS: str {{x[0-9]+}}, [sp, #16] call void(...) @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0]) ret void Index: test/CodeGen/AArch64/arm64-abi-varargs.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi-varargs.ll +++ test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -7,17 +7,13 @@ define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { ; CHECK-LABEL: fn9: ; 9th fixed argument -; CHECK: ldr {{w[0-9]+}}, [sp, #64] -; CHECK: add [[ARGS:x[0-9]+]], sp, #72 -; CHECK: add {{x[0-9]+}}, [[ARGS]], #8 +; CHECK: add x[[ADDR:[0-9]+]], sp, #72 ; First vararg -; CHECK: ldr {{w[0-9]+}}, [sp, #72] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [sp, #72] ; Second vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] -; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]] ; Third vararg -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] +; CHECK-DAG: ldr {{w[0-9]+}}, [x[[ADDR]]], #8 %1 = alloca i32, align 4 %2 = alloca i32, align 4 %3 = alloca i32, align 4 Index: test/CodeGen/AArch64/arm64-abi.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi.ll +++ test/CodeGen/AArch64/arm64-abi.ll @@ -205,10 +205,7 @@ define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind { entry: ; CHECK-LABEL: test8 -; CHECK: strb {{w[0-9]+}}, [sp, #3] -; CHECK: strb wzr, [sp, #2] -; CHECK: strb {{w[0-9]+}}, [sp, #1] -; CHECK: strb wzr, [sp] +; CHECK: str w8, [sp, #-16]! ; CHECK: bl ; FAST-LABEL: test8 ; FAST: strb {{w[0-9]+}}, [sp] Index: test/CodeGen/AArch64/arm64-memset-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memset-inline.ll +++ test/CodeGen/AArch64/arm64-memset-inline.ll @@ -9,11 +9,15 @@ ret void } +; FIXME: This shouldn't need to load in a zero value to store +; (e.g. stp xzr,xzr [sp, #16]) + define void @t2() nounwind ssp { entry: ; CHECK-LABEL: t2: +; CHECK: movi v0.2d, #0000000000000000 +; CHECK: stur q0, [sp, #16] ; CHECK: strh wzr, [sp, #32] -; CHECK: stp xzr, xzr, [sp, #16] ; CHECK: str xzr, [sp, #8] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 Index: test/CodeGen/AArch64/arm64-stur.ll =================================================================== --- test/CodeGen/AArch64/arm64-stur.ll +++ test/CodeGen/AArch64/arm64-stur.ll @@ -47,11 +47,14 @@ ret void } +;; FIXME: Again, with the writing of a quadword zero... + define void @foo(%struct.X* nocapture %p) nounwind optsize ssp { ; CHECK-LABEL: foo: ; CHECK-NOT: str -; CHECK: stur xzr, [x0, #12] -; CHECK-NEXT: stur xzr, [x0, #4] +; CHECK: stur q0, [x0, #4] +; CHECK-FIXME: stur xzr, [x0, #12] +; CHECK-FIXME-NEXT: stur xzr, [x0, #4] ; CHECK-NEXT: ret %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1 %val = bitcast i64* %B to i8* Index: test/CodeGen/AArch64/merge-store.ll =================================================================== --- test/CodeGen/AArch64/merge-store.ll +++ test/CodeGen/AArch64/merge-store.ll @@ -5,8 +5,9 @@ @g0 = external global <3 x float>, align 16 @g1 = external global <3 x float>, align 4 -; CHECK: ldr s[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]]{{\]}}, #4 -; CHECK: ld1{{\.?s?}} { v[[R0]]{{\.?s?}} }[1], {{\[}}[[R1]]{{\]}} +; CHECK: ldr q[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]], :lo12:g0 +;; TODO: this next line seems like a redundant no-op move? +; CHECK: ins v0.s[1], v0.s[1] ; CHECK: str d[[R0]] define void @blam() { Index: test/CodeGen/AArch64/vector_merge_dep_check.ll =================================================================== --- test/CodeGen/AArch64/vector_merge_dep_check.ll +++ test/CodeGen/AArch64/vector_merge_dep_check.ll @@ -1,5 +1,4 @@ -; RUN: llc --combiner-alias-analysis=false < %s | FileCheck %s -; RUN: llc --combiner-alias-analysis=true < %s | FileCheck %s +; RUN: llc < %s | FileCheck %s ; This test checks that we do not merge stores together which have ; dependencies through their non-chain operands (e.g. one store is the Index: test/CodeGen/AMDGPU/merge-stores.ll =================================================================== --- test/CodeGen/AMDGPU/merge-stores.ll +++ test/CodeGen/AMDGPU/merge-stores.ll @@ -1,8 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s - -; RUN: llc -march=amdgcn -verify-machineinstrs -combiner-alias-analysis < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -combiner-alias-analysis < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s ; Run with devices with different unaligned load restrictions. @@ -148,17 +145,10 @@ ret void } -; FIXME: Should be able to merge this ; GCN-LABEL: {{^}}merge_global_store_4_constants_mixed_i32_f32: -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx2 -; GCN-AA: buffer_store_dword v -; GCN-AA: buffer_store_dword v - +; GCN: buffer_store_dwordx2 +; GCN: buffer_store_dword v +; GCN: buffer_store_dword v ; GCN: s_endpgm define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 @@ -477,17 +467,9 @@ ret void } -; This works once AA is enabled on the subtarget ; GCN-LABEL: {{^}}merge_global_store_4_vector_elts_loads_v4i32: ; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]] - -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v -; GCN-NOAA: buffer_store_dword v - -; GCN-AA: buffer_store_dwordx4 [[LOAD]] - +; GCN: buffer_store_dwordx4 [[LOAD]] ; GCN: s_endpgm define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 Index: test/CodeGen/AMDGPU/private-element-size.ll =================================================================== --- test/CodeGen/AMDGPU/private-element-size.ll +++ test/CodeGen/AMDGPU/private-element-size.ll @@ -32,10 +32,10 @@ ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:24{{$}} ; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:28{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} -; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}} +; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}} define void @private_elt_size_v4i32(<4 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll =================================================================== --- test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll +++ test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll @@ -42,10 +42,7 @@ ; GCN-LABEL: {{^}}stored_vector_pointer_value: ; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD0 ; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD1 -; GCN: buffer_store_dword -; GCN: buffer_store_dword -; GCN: buffer_store_dword -; GCN: buffer_store_dword +; GCN: buffer_store_dwordx4 define void @stored_vector_pointer_value(i32* addrspace(1)* %out, i32 %index) { entry: %tmp0 = alloca [4 x i32] Index: test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll =================================================================== --- test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll +++ test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll @@ -10,9 +10,11 @@ @stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8 ; FUNC-LABEL: @reorder_local_load_global_store_local_load -; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4 -; CI-NEXT: buffer_store_dword -; CI-NEXT: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8 +; CI-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4 +; CI-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8 +; CI-DAG: buffer_store_dword +; CI-DAG: buffer_store_dword + define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 { %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4 @@ -156,13 +158,10 @@ } ; FUNC-LABEL: @reorder_local_offsets -; FIXME: The scheduler doesn't think its proftible to re-order the -; loads and stores, and I'm not sure that it really is. -; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12 -; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:400 -; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:404 -; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:400 -; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:404 +; CI-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12 +; CI-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:400 +; CI-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:404 +; CI-DAG: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:100 offset1:101 ; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 { @@ -184,12 +183,10 @@ } ; FUNC-LABEL: @reorder_global_offsets -; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 -; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404 -; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 -; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 -; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404 +; CI-DAG: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 +; CI-DAG: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404 +; CI-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400 ; CI: buffer_store_dword ; CI: s_endpgm define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 { Index: test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll =================================================================== --- test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll +++ test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll @@ -3,6 +3,13 @@ ; RUN: llc -march=amdgcn -mcpu=hawaii -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CIHSA -check-prefix=HSA %s ; RUN: llc -march=amdgcn -mcpu=fiji -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VIHSA -check-prefix=HSA %s +; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. + +;; XFAIL: * + ; This ends up using all 256 registers and requires register ; scavenging which will fail to find an unsued register. Index: test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll =================================================================== --- test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -1,6 +1,12 @@ ; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +;; FIXME: this fails because the load generated from extractelement is +;; now properly recognized as forwardable to the value stored in +;; insertelement, and thus the loads/stores drop away entirely. This +;; makes the intended test, of running out of registers, not occur. +;; XFAIL: * + ; This ends up using all 255 registers and requires register ; scavenging which will fail to find an unsued register. @@ -28,12 +34,12 @@ define amdgpu_vs void @main([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) { bb: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i64 0, i64 0 - %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0 + %tmp11 = load volatile <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0 %tmp12 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 0) %tmp13 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 16) %tmp14 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 32) %tmp15 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0 - %tmp16 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0 + %tmp16 = load volatile <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0 %tmp17 = add i32 %arg5, %arg7 %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp16, i32 0, i32 %tmp17) %tmp19 = extractelement <4 x float> %tmp18, i32 0 Index: test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll =================================================================== --- test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll +++ test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll @@ -12,7 +12,8 @@ entry: ; CHECK: sub sp, sp, #12 ; CHECK: sub sp, sp, #4 -; CHECK: stmib sp, {r1, r2, r3} +; CHECK: add r0, sp, #4 +; CHECK: stm sp, {r0, r1, r2, r3} %g = alloca i8* %g1 = bitcast i8** %g to i8* call void @llvm.va_start(i8* %g1) Index: test/CodeGen/ARM/alloc-no-stack-realign.ll =================================================================== --- test/CodeGen/ARM/alloc-no-stack-realign.ll +++ test/CodeGen/ARM/alloc-no-stack-realign.ll @@ -51,12 +51,12 @@ ; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] ; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] -; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16 -; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128] +; REALIGN: orr r[[R1:[0-9]+]], r[[R1]], #16 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128] ; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48 Index: test/CodeGen/ARM/ifcvt10.ll =================================================================== --- test/CodeGen/ARM/ifcvt10.ll +++ test/CodeGen/ARM/ifcvt10.ll @@ -10,8 +10,6 @@ ; CHECK: vpop {d8} ; CHECK-NOT: vpopne ; CHECK: pop {r7, pc} -; CHECK: vpop {d8} -; CHECK: pop {r7, pc} br i1 undef, label %if.else, label %if.then if.then: ; preds = %entry Index: test/CodeGen/ARM/memset-inline.ll =================================================================== --- test/CodeGen/ARM/memset-inline.ll +++ test/CodeGen/ARM/memset-inline.ll @@ -3,9 +3,15 @@ define void @t1(i8* nocapture %c) nounwind optsize { entry: ; CHECK-LABEL: t1: + +;; FIXME: like with arm64-memset-inline.ll, learning how to merge +;; stores made this code worse, since it now uses a vector move, +;; instead of just using an strd instruction taking two registers. + +; CHECK: vmov.i32 d16, #0x0 +; CHECK: vst1.32 {d16}, [r0:64]! ; CHECK: movs r1, #0 -; CHECK: strd r1, r1, [r0] -; CHECK: str r1, [r0, #8] +; CHECK: str r1, [r0] call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false) ret void } Index: test/CodeGen/ARM/static-addr-hoisting.ll =================================================================== --- test/CodeGen/ARM/static-addr-hoisting.ll +++ test/CodeGen/ARM/static-addr-hoisting.ll @@ -6,9 +6,9 @@ ; CHECK: movs [[VAL:r[0-9]+]], #42 ; CHECK: movt r[[BASE1]], #15 -; CHECK: str [[VAL]], [r[[BASE1]]] -; CHECK: str [[VAL]], [r[[BASE1]], #24] -; CHECK: str.w [[VAL]], [r[[BASE1]], #42] +; CHECK-DAG: str [[VAL]], [r[[BASE1]]] +; CHECK-DAG: str [[VAL]], [r[[BASE1]], #24] +; CHECK-DAG: str.w [[VAL]], [r[[BASE1]], #42] ; CHECK: movw r[[BASE2:[0-9]+]], #20394 ; CHECK: movt r[[BASE2]], #18 Index: test/CodeGen/BPF/undef.ll =================================================================== --- test/CodeGen/BPF/undef.ll +++ test/CodeGen/BPF/undef.ll @@ -12,51 +12,51 @@ @llvm.used = appending global [6 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @_license, i32 0, i32 0), i8* bitcast (i32 (%struct.__sk_buff*)* @ebpf_filter to i8*), i8* bitcast (%struct.bpf_map_def* @routing to i8*), i8* bitcast (%struct.bpf_map_def* @routing_miss_0 to i8*), i8* bitcast (%struct.bpf_map_def* @test1 to i8*), i8* bitcast (%struct.bpf_map_def* @test1_miss_4 to i8*)], section "llvm.metadata" ; Function Attrs: nounwind uwtable +; CHECK: mov r2, r10 +; CHECK: addi r2, -2 +; CHECK: mov r1, 0 +; CHECK: sth 6(r2), r1 +; CHECK: sth 4(r2), r1 +; CHECK: sth 2(r2), r1 +; CHECK: mov r2, 6 +; CHECK: stb -7(r10), r2 +; CHECK: mov r2, 5 +; CHECK: stb -8(r10), r2 +; CHECK: mov r2, 7 +; CHECK: stb -6(r10), r2 +; CHECK: mov r2, 8 +; CHECK: stb -5(r10), r2 +; CHECK: mov r2, 9 +; CHECK: stb -4(r10), r2 +; CHECK: mov r2, 10 +; CHECK: stb -3(r10), r2 +; CHECK: sth 24(r10), r1 +; CHECK: sth 22(r10), r1 +; CHECK: sth 20(r10), r1 +; CHECK: sth 18(r10), r1 +; CHECK: sth 16(r10), r1 +; CHECK: sth 14(r10), r1 +; CHECK: sth 12(r10), r1 +; CHECK: sth 10(r10), r1 +; CHECK: sth 8(r10), r1 +; CHECK: sth 6(r10), r1 +; CHECK: sth -2(r10), r1 +; CHECK: sth 26(r10), r1 define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" { %key = alloca %struct.routing_key_2, align 1 %1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0 -; CHECK: mov r1, 5 -; CHECK: stb -8(r10), r1 store i8 5, i8* %1, align 1 %2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1 -; CHECK: mov r1, 6 -; CHECK: stb -7(r10), r1 store i8 6, i8* %2, align 1 %3 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 2 -; CHECK: mov r1, 7 -; CHECK: stb -6(r10), r1 store i8 7, i8* %3, align 1 %4 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 3 -; CHECK: mov r1, 8 -; CHECK: stb -5(r10), r1 store i8 8, i8* %4, align 1 %5 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 4 -; CHECK: mov r1, 9 -; CHECK: stb -4(r10), r1 store i8 9, i8* %5, align 1 %6 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 5 -; CHECK: mov r1, 10 -; CHECK: stb -3(r10), r1 store i8 10, i8* %6, align 1 %7 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 1, i32 0, i64 0 -; CHECK: mov r1, r10 -; CHECK: addi r1, -2 -; CHECK: mov r2, 0 -; CHECK: sth 6(r1), r2 -; CHECK: sth 4(r1), r2 -; CHECK: sth 2(r1), r2 -; CHECK: sth 24(r10), r2 -; CHECK: sth 22(r10), r2 -; CHECK: sth 20(r10), r2 -; CHECK: sth 18(r10), r2 -; CHECK: sth 16(r10), r2 -; CHECK: sth 14(r10), r2 -; CHECK: sth 12(r10), r2 -; CHECK: sth 10(r10), r2 -; CHECK: sth 8(r10), r2 -; CHECK: sth 6(r10), r2 -; CHECK: sth -2(r10), r2 -; CHECK: sth 26(r10), r2 call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i32 1, i1 false) %8 = call i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...) bitcast (i32 (...)* @bpf_map_lookup_elem to i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...)*)(%struct.bpf_map_def* nonnull @routing, %struct.routing_key_2* nonnull %key) #3 ret i32 undef Index: test/CodeGen/MSP430/Inst16mm.ll =================================================================== --- test/CodeGen/MSP430/Inst16mm.ll +++ test/CodeGen/MSP430/Inst16mm.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=msp430 -combiner-alias-analysis < %s | FileCheck %s +; RUN: llc -march=msp430 < %s | FileCheck %s target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8" target triple = "msp430-generic-generic" @foo = common global i16 0, align 2 Index: test/CodeGen/Mips/cconv/arguments-float.ll =================================================================== --- test/CodeGen/Mips/cconv/arguments-float.ll +++ test/CodeGen/Mips/cconv/arguments-float.ll @@ -63,39 +63,39 @@ ; NEW-DAG: sd $5, 16([[R2]]) ; O32 has run out of argument registers and starts using the stack -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp) ; O32-DAG: sw [[R3]], 24([[R2]]) ; O32-DAG: sw [[R4]], 28([[R2]]) ; NEW-DAG: sd $6, 24([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) ; O32-DAG: sw [[R3]], 32([[R2]]) ; O32-DAG: sw [[R4]], 36([[R2]]) ; NEW-DAG: sd $7, 32([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) ; O32-DAG: sw [[R3]], 40([[R2]]) ; O32-DAG: sw [[R4]], 44([[R2]]) ; NEW-DAG: sd $8, 40([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) ; O32-DAG: sw [[R3]], 48([[R2]]) ; O32-DAG: sw [[R4]], 52([[R2]]) ; NEW-DAG: sd $9, 48([[R2]]) -; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 56($sp) -; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 60($sp) +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) ; O32-DAG: sw [[R3]], 56([[R2]]) ; O32-DAG: sw [[R4]], 60([[R2]]) ; NEW-DAG: sd $10, 56([[R2]]) ; N32/N64 have run out of registers and starts using the stack too -; O32-DAG: lw [[R3:\$[0-9]+]], 64($sp) -; O32-DAG: lw [[R4:\$[0-9]+]], 68($sp) +; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp) ; O32-DAG: sw [[R3]], 64([[R2]]) ; O32-DAG: sw [[R4]], 68([[R2]]) ; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp) Index: test/CodeGen/Mips/cconv/arguments-varargs.ll =================================================================== --- test/CodeGen/Mips/cconv/arguments-varargs.ll +++ test/CodeGen/Mips/cconv/arguments-varargs.ll @@ -315,12 +315,11 @@ ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -349,10 +348,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -678,12 +676,11 @@ ; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte ; order. ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -712,10 +709,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 ; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) @@ -1040,10 +1036,9 @@ ; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) ; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG1]], 8([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG1]], 12([[GV]]) ; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords) @@ -1072,10 +1067,9 @@ ; Load the second argument from the variable portion and copy it to the global. ; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) ; O32-DAG: sw [[ARG2]], 16([[GV]]) -; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) -; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4 -; O32-DAG: sw [[VA2]], 0([[SP]]) -; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]]) +; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4 +; O32-DAG: sw [[VA3]], 0([[SP]]) +; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]]) ; O32-DAG: sw [[ARG2]], 20([[GV]]) ; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]]) Index: test/CodeGen/Mips/fastcc.ll =================================================================== --- test/CodeGen/Mips/fastcc.ll +++ test/CodeGen/Mips/fastcc.ll @@ -132,20 +132,19 @@ define internal fastcc void @callee0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15, i32 %a16) nounwind noinline { entry: ; CHECK: callee0 -; CHECK: sw $4 -; CHECK: sw $5 -; CHECK: sw $6 -; CHECK: sw $7 -; CHECK: sw $8 -; CHECK: sw $9 -; CHECK: sw $10 -; CHECK: sw $11 -; CHECK: sw $12 -; CHECK: sw $13 -; CHECK: sw $14 -; CHECK: sw $15 -; CHECK: sw $24 -; CHECK: sw $3 +; CHECK-DAG: sw $4 +; CHECK-DAG: sw $5 +; CHECK-DAG: sw $7 +; CHECK-DAG: sw $8 +; CHECK-DAG: sw $9 +; CHECK-DAG: sw $10 +; CHECK-DAG: sw $11 +; CHECK-DAG: sw $12 +; CHECK-DAG: sw $13 +; CHECK-DAG: sw $14 +; CHECK-DAG: sw $15 +; CHECK-DAG: sw $24 +; CHECK-DAG: sw $3 ; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc. ; CHECK-NACL-NOT: sw $14 @@ -223,27 +222,27 @@ define internal fastcc void @callee1(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15, float %a16, float %a17, float %a18, float %a19, float %a20) nounwind noinline { entry: -; CHECK: callee1 -; CHECK: swc1 $f0 -; CHECK: swc1 $f1 -; CHECK: swc1 $f2 -; CHECK: swc1 $f3 -; CHECK: swc1 $f4 -; CHECK: swc1 $f5 -; CHECK: swc1 $f6 -; CHECK: swc1 $f7 -; CHECK: swc1 $f8 -; CHECK: swc1 $f9 -; CHECK: swc1 $f10 -; CHECK: swc1 $f11 -; CHECK: swc1 $f12 -; CHECK: swc1 $f13 -; CHECK: swc1 $f14 -; CHECK: swc1 $f15 -; CHECK: swc1 $f16 -; CHECK: swc1 $f17 -; CHECK: swc1 $f18 -; CHECK: swc1 $f19 +; CHECK-LABEL: callee1: +; CHECK-DAG: swc1 $f0 +; CHECK-DAG: swc1 $f1 +; CHECK-DAG: swc1 $f2 +; CHECK-DAG: swc1 $f3 +; CHECK-DAG: swc1 $f4 +; CHECK-DAG: swc1 $f5 +; CHECK-DAG: swc1 $f6 +; CHECK-DAG: swc1 $f7 +; CHECK-DAG: swc1 $f8 +; CHECK-DAG: swc1 $f9 +; CHECK-DAG: swc1 $f10 +; CHECK-DAG: swc1 $f11 +; CHECK-DAG: swc1 $f12 +; CHECK-DAG: swc1 $f13 +; CHECK-DAG: swc1 $f14 +; CHECK-DAG: swc1 $f15 +; CHECK-DAG: swc1 $f16 +; CHECK-DAG: swc1 $f17 +; CHECK-DAG: swc1 $f18 +; CHECK-DAG: swc1 $f19 store float %a0, float* @gf0, align 4 store float %a1, float* @gf1, align 4 @@ -316,8 +315,6 @@ ; NOODDSPREG-LABEL: callee2: -; NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] - ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -333,7 +330,7 @@ ; NOODDSPREG-DAG: swc1 $f16, 32($[[R0]]) ; NOODDSPREG-DAG: swc1 $f18, 36($[[R0]]) -; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]]) store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4 @@ -397,7 +394,6 @@ ; FP64-NOODDSPREG-LABEL: callee3: -; FP64-NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]] ; Check that first 10 arguments are received in even float registers ; f0, f2, ... , f18. Check that 11th argument is received on stack. @@ -414,7 +410,7 @@ ; FP64-NOODDSPREG-DAG: sdc1 $f16, 64($[[R0]]) ; FP64-NOODDSPREG-DAG: sdc1 $f18, 72($[[R0]]) -; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp) +; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 0($sp) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]]) store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8 Index: test/CodeGen/Mips/load-store-left-right.ll =================================================================== --- test/CodeGen/Mips/load-store-left-right.ll +++ test/CodeGen/Mips/load-store-left-right.ll @@ -250,12 +250,18 @@ ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)( -; FIXME: We should be able to do better than this on MIPS32r6/MIPS64r6 since -; we have unaligned halfword load/store available -; ALL-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; ALL-DAG: sb $[[R1]], 2($[[PTR]]) -; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; ALL-DAG: sb $[[R1]], 3($[[PTR]]) +; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS32-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS32-DAG: sb $[[R2]], 3($[[PTR]]) + +; MIPS32R6: lhu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6: sh $[[R1]], 2($[[PTR]]) + +; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-DAG: sb $[[R1]], 2($[[PTR]]) +; MIPS64-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]]) +; MIPS64-DAG: sb $[[R2]], 3($[[PTR]]) %0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 0), align 1 store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 1), align 1 @@ -268,37 +274,54 @@ ; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s1)( ; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS32-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS32-DAG: sb $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS32-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS32-EL-DAG: swr $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS32-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS32-EB-DAG: swr $[[R1]], 7($[[PTR]]) + +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s1)( -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS32R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS32R6-DAG: sw $[[R1]], 4($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 4($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 5($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 6($[[PTR]]) -; MIPS64-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-DAG: sb $[[R1]], 7($[[PTR]]) + +; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: swl $[[R1]], 7($[[PTR]]) +; MIPS64-EL-DAG: swr $[[R1]], 4($[[PTR]]) + +; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB-DAG: swl $[[R1]], 4($[[PTR]]) +; MIPS64-EB-DAG: swr $[[R1]], 7($[[PTR]]) + + +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]]) +; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)( -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 4($[[PTR]]) -; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) -; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]]) +; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sw $[[R1]], 4($[[PTR]]) %0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 0), align 1 store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 1), align 1 @@ -336,30 +359,21 @@ ; MIPS32R6-DAG: sw $[[R1]], 12($[[PTR]]) ; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 11($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 8($[[PTR]]) -; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]]) -; MIPS64-EL-DAG: lwr $[[R1]], 4($[[PTR]]) -; MIPS64-EL-DAG: swl $[[R1]], 15($[[PTR]]) -; MIPS64-EL-DAG: swr $[[R1]], 12($[[PTR]]) + +; MIPS64-EL-DAG: ldl $[[R1:[0-9]+]], 7($[[PTR]]) +; MIPS64-EL-DAG: ldr $[[R1]], 0($[[PTR]]) +; MIPS64-EL-DAG: sdl $[[R1]], 15($[[PTR]]) +; MIPS64-EL-DAG: sdr $[[R1]], 8($[[PTR]]) ; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 8($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 11($[[PTR]]) -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 7($[[PTR]]) -; MIPS64-EB-DAG: swl $[[R1]], 12($[[PTR]]) -; MIPS64-EB-DAG: swr $[[R1]], 15($[[PTR]]) +; MIPS64-EB-DAG: ldl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB-DAG: ldr $[[R1]], 7($[[PTR]]) +; MIPS64-EB-DAG: sdl $[[R1]], 8($[[PTR]]) +; MIPS64-EB-DAG: sdr $[[R1]], 15($[[PTR]]) ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)( -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 8($[[PTR]]) -; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]]) -; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]]) +; MIPS64R6-DAG: ld $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64R6-DAG: sd $[[R1]], 8($[[PTR]]) %0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 0), align 1 store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 1), align 1 @@ -416,19 +430,19 @@ ; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]]) ; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]]) -; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( -; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]]) -; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]]) -; MIPS64-EB-DAG: dsll $[[R1]], $[[R1]], 32 -; MIPS64-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]]) -; MIPS64-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]]) -; MIPS64-EB-DAG: dsll $[[T0:[0-9]+]], $[[R3]], 8 -; MIPS64-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]] -; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16 -; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R1]], $[[T1]] -; MIPS64-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]]) -; MIPS64-EB-DAG: dsll $[[T4:[0-9]+]], $[[R4]], 8 -; MIPS64-EB-DAG: or $4, $[[T3]], $[[T4]] +; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)( +; MIPS64-EB: lbu $[[R2:[0-9]+]], 5($[[PTR]]) +; MIPS64-EB: lbu $[[R3:[0-9]+]], 4($[[PTR]]) +; MIPS64-EB: dsll $[[T0:[0-9]+]], $[[R3]], 8 +; MIPS64-EB: or $[[T1:[0-9]+]], $[[T0]], $[[R2]] +; MIPS64-EB: lbu $[[R4:[0-9]+]], 6($[[PTR]]) +; MIPS64-EB: dsll $[[T1]], $[[T1]], 16 +; MIPS64-EB: lwl $[[R1:[0-9]+]], 0($[[PTR]]) +; MIPS64-EB: lwr $[[R1]], 3($[[PTR]]) +; MIPS64-EB: dsll $[[R5:[0-9]+]], $[[R1]], 32 +; MIPS64-EB: or $[[T3:[0-9]+]], $[[R5]], $[[T1]] +; MIPS64-EB: dsll $[[T4:[0-9]+]], $[[R4]], 8 +; MIPS64-EB: or $4, $[[T3]], $[[T4]] ; MIPS64R6: ld $[[SPTR:[0-9]+]], %got_disp(arr)( Index: test/CodeGen/Mips/micromips-li.ll =================================================================== --- test/CodeGen/Mips/micromips-li.ll +++ test/CodeGen/Mips/micromips-li.ll @@ -13,6 +13,6 @@ ret i32 0 } -; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: addiu ${{[0-9]+}}, $zero, 2148 +; CHECK: li16 ${{[2-7]|16|17}}, 1 ; CHECK: ori ${{[0-9]+}}, $zero, 33332 Index: test/CodeGen/Mips/mips64-f128.ll =================================================================== --- test/CodeGen/Mips/mips64-f128.ll +++ test/CodeGen/Mips/mips64-f128.ll @@ -573,10 +573,10 @@ ; ALL-LABEL: store_LD_LD: ; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1) -; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: ld $[[R2:[0-9]+]], 8($[[R0]]) ; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0) ; ALL: sd $[[R2]], 8($[[R3]]) +; ALL: ld $[[R1:[0-9]+]], 0($[[R0]]) ; ALL: sd $[[R1]], 0($[[R3]]) define void @store_LD_LD() { Index: test/CodeGen/Mips/o32_cc_byval.ll =================================================================== --- test/CodeGen/Mips/o32_cc_byval.ll +++ test/CodeGen/Mips/o32_cc_byval.ll @@ -45,20 +45,18 @@ define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: lw $4, 80($sp) -; CHECK: ldc1 $f[[F0:[0-9]+]], 72($sp) -; CHECK: lw $[[R3:[0-9]+]], 64($sp) -; CHECK: lw $[[R4:[0-9]+]], 68($sp) -; CHECK: lw $[[R2:[0-9]+]], 60($sp) -; CHECK: lh $[[R1:[0-9]+]], 58($sp) -; CHECK: lb $[[R0:[0-9]+]], 56($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 28($sp) -; CHECK: sw $[[R2]], 24($sp) -; CHECK: sw $[[R4]], 20($sp) -; CHECK: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: ldc1 $f[[F0:[0-9]+]], 72($sp) +; CHECK-DAG: lw $[[R3:[0-9]+]], 64($sp) +; CHECK-DAG: lw $[[R4:[0-9]+]], 68($sp) +; CHECK-DAG: lh $[[R1:[0-9]+]], 58($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 56($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 28($sp) +; CHECK-DAG: sw $[[R4]], 20($sp) +; CHECK-DAG: sw $[[R3]], 16($sp) +; CHECK-DAG: sw $7, 24($sp) ; CHECK: mfc1 $6, $f[[F0]] %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 @@ -86,9 +84,7 @@ ; CHECK: sw $6, 56($sp) ; CHECK: sw $5, 52($sp) ; CHECK: sw $4, 48($sp) -; CHECK: lw $4, 48($sp) -; CHECK: lw $[[R0:[0-9]+]], 60($sp) -; CHECK: sw $[[R0]], 24($sp) +; CHECK: sw $7, 24($sp) %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0 %tmp = load i32, i32* %arrayidx, align 4 @@ -101,14 +97,14 @@ define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK: sw $7, 60($sp) -; CHECK: sw $6, 56($sp) -; CHECK: sw $5, 52($sp) -; CHECK: lw $4, 60($sp) -; CHECK: lw $[[R1:[0-9]+]], 80($sp) -; CHECK: lb $[[R0:[0-9]+]], 52($sp) -; CHECK: sw $[[R0]], 32($sp) -; CHECK: sw $[[R1]], 24($sp) +; CHECK-DAG: sw $7, 60($sp) +; CHECK-DAG: sw $6, 56($sp) +; CHECK-DAG: sw $5, 52($sp) +; CHECK-DAG: lw $[[R1:[0-9]+]], 80($sp) +; CHECK-DAG: lb $[[R0:[0-9]+]], 52($sp) +; CHECK-DAG: sw $[[R0]], 32($sp) +; CHECK-DAG: sw $[[R1]], 24($sp) +; CHECK: move $4, $7 %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 %tmp = load i32, i32* %i, align 4 Index: test/CodeGen/Mips/o32_cc_vararg.ll =================================================================== --- test/CodeGen/Mips/o32_cc_vararg.ll +++ test/CodeGen/Mips/o32_cc_vararg.ll @@ -29,9 +29,9 @@ ; CHECK-LABEL: va1: ; CHECK: addiu $sp, $sp, -16 +; CHECK: sw $5, 20($sp) ; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) -; CHECK: sw $5, 20($sp) ; CHECK: lw $2, 20($sp) } @@ -83,8 +83,8 @@ ; CHECK-LABEL: va3: ; CHECK: addiu $sp, $sp, -16 -; CHECK: sw $7, 28($sp) ; CHECK: sw $6, 24($sp) +; CHECK: sw $7, 28($sp) ; CHECK: lw $2, 24($sp) } Index: test/CodeGen/PowerPC/anon_aggr.ll =================================================================== --- test/CodeGen/PowerPC/anon_aggr.ll +++ test/CodeGen/PowerPC/anon_aggr.ll @@ -60,10 +60,9 @@ unequal: ret i8* %array2_ptr } - ; CHECK-LABEL: func2: -; CHECK: ld [[REG2:[0-9]+]], 72(1) -; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]] +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG2:[0-9]+]], 6 ; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]] ; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]] ; CHECK: ld 3, -[[OFFSET2]](1) @@ -85,8 +84,8 @@ ; DARWIN64: mr ; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]] ; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]] -; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]] +; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]] ; DARWIN64: ld r3, -[[OFFSET1]] ; DARWIN64: ld r3, -[[OFFSET2]] @@ -106,24 +105,24 @@ } ; CHECK-LABEL: func3: -; CHECK: ld [[REG3:[0-9]+]], 72(1) -; CHECK: ld [[REG4:[0-9]+]], 56(1) -; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]] -; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) +; CHECK: cmpld {{([0-9]+,)?}}4, 6 +; CHECK: mr [[REG3:[0-9]+]], 6 +; CHECK: mr [[REG4:[0-9]+]], 4 ; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1) +; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1) ; CHECK: ld 3, -[[OFFSET2]](1) ; CHECK: ld 3, -[[OFFSET1]](1) ; DARWIN32: _func3: -; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 -; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 -; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) -; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) +; DARWIN32-DAG: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36 +; DARWIN32-DAG: addi r[[REG2:[0-9]+]], r[[REGSP]], 24 +; DARWIN32-DAG: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]]) +; DARWIN32-DAG: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]]) ; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]] -; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]] -; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]] -; DARWIN32: lwz r3, -[[OFFSET2]] -; DARWIN32: lwz r3, -[[OFFSET1]] +; DARWIN32-DAG: stw r[[REG3]], -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: stw r[[REG4]], -[[OFFSET2:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET1:[0-9]+]] +; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]] ; DARWIN64: _func3: ; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1) Index: test/CodeGen/PowerPC/builtins-ppc-p8vector.ll =================================================================== --- test/CodeGen/PowerPC/builtins-ppc-p8vector.ll +++ test/CodeGen/PowerPC/builtins-ppc-p8vector.ll @@ -26,8 +26,7 @@ ret void ; CHECK-LABEL: @test1 ; CHECK: lvx [[REG1:[0-9]+]], -; CHECK: lvx [[REG2:[0-9]+]], -; CHECK: vbpermq {{[0-9]+}}, [[REG2]], [[REG1]] +; CHECK: vbpermq {{[0-9]+}}, [[REG1]], [[REG1]] ; CHECK-VSX: vbpermq {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} } @@ -47,8 +46,7 @@ ret void ; CHECK-LABEL: @test2 ; CHECK: lvx [[REG1:[0-9]+]], -; CHECK: lvx [[REG2:[0-9]+]], -; CHECK: vbpermq {{[0-9]+}}, [[REG2]], [[REG1]] +; CHECK: vbpermq {{[0-9]+}}, [[REG1]], [[REG1]] ; CHECK-VSX: vbpermq {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} } Index: test/CodeGen/PowerPC/complex-return.ll =================================================================== --- test/CodeGen/PowerPC/complex-return.ll +++ test/CodeGen/PowerPC/complex-return.ll @@ -1,55 +1,25 @@ -; RUN: llc -mcpu=ppc64 -O0 < %s | FileCheck %s +; RUN: llc -mcpu=ppc64 < %s | FileCheck %s target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" target triple = "powerpc64-unknown-linux-gnu" define { ppc_fp128, ppc_fp128 } @foo() nounwind { entry: - %retval = alloca { ppc_fp128, ppc_fp128 }, align 16 - %x = alloca { ppc_fp128, ppc_fp128 }, align 16 - %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0 - %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1 - store ppc_fp128 0xM400C0000000000000000000000000000, ppc_fp128* %real - store ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A, ppc_fp128* %imag - %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0 - %x.real = load ppc_fp128, ppc_fp128* %x.realp - %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1 - %x.imag = load ppc_fp128, ppc_fp128* %x.imagp - %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0 - %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1 - store ppc_fp128 %x.real, ppc_fp128* %real1 - store ppc_fp128 %x.imag, ppc_fp128* %imag2 - %0 = load { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval - ret { ppc_fp128, ppc_fp128 } %0 + ret { ppc_fp128, ppc_fp128 } { ppc_fp128 0xM400C0000000000000000000000000000, ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A } } ; CHECK-LABEL: foo: -; CHECK: lfd 1 -; CHECK: lfd 2 -; CHECK: lfd 3 -; CHECK: lfd 4 +; CHECK-DAG: lfd 3 +; CHECK-DAG: lfd 4 +; CHECK-DAG: lfs 1 +; CHECK-DAG: lfs 2 define { float, float } @oof() nounwind { entry: - %retval = alloca { float, float }, align 4 - %x = alloca { float, float }, align 4 - %real = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0 - %imag = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1 - store float 3.500000e+00, float* %real - store float 0xC00547AE20000000, float* %imag - %x.realp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0 - %x.real = load float, float* %x.realp - %x.imagp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1 - %x.imag = load float, float* %x.imagp - %real1 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0 - %imag2 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1 - store float %x.real, float* %real1 - store float %x.imag, float* %imag2 - %0 = load { float, float }, { float, float }* %retval - ret { float, float } %0 + ret { float, float } { float 3.500000e+00, float 0xC00547AE20000000 } } ; CHECK-LABEL: oof: -; CHECK: lfs 2 -; CHECK: lfs 1 +; CHECK-DAG: lfs 2 +; CHECK-DAG: lfs 1 Index: test/CodeGen/PowerPC/jaggedstructs.ll =================================================================== --- test/CodeGen/PowerPC/jaggedstructs.ll +++ test/CodeGen/PowerPC/jaggedstructs.ll @@ -18,14 +18,14 @@ ret void } -; CHECK: std 6, 184(1) -; CHECK: std 5, 176(1) -; CHECK: std 4, 168(1) -; CHECK: std 3, 160(1) -; CHECK: lbz {{[0-9]+}}, 167(1) -; CHECK: lhz {{[0-9]+}}, 165(1) -; CHECK: stb {{[0-9]+}}, 55(1) -; CHECK: sth {{[0-9]+}}, 53(1) +; CHECK-DAG: std 3, 160(1) +; CHECK-DAG: std 6, 184(1) +; CHECK-DAG: std 5, 176(1) +; CHECK-DAG: std 4, 168(1) +; CHECK-DAG: lbz {{[0-9]+}}, 167(1) +; CHECK-DAG: lhz {{[0-9]+}}, 165(1) +; CHECK-DAG: stb {{[0-9]+}}, 55(1) +; CHECK-DAG: sth {{[0-9]+}}, 53(1) ; CHECK: lbz {{[0-9]+}}, 175(1) ; CHECK: lwz {{[0-9]+}}, 171(1) ; CHECK: stb {{[0-9]+}}, 63(1) Index: test/CodeGen/PowerPC/ppc64-align-long-double.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-align-long-double.ll +++ test/CodeGen/PowerPC/ppc64-align-long-double.ll @@ -18,19 +18,35 @@ ret ppc_fp128 %0 } -; CHECK-DAG: std 6, 72(1) -; CHECK-DAG: std 5, 64(1) -; CHECK-DAG: std 4, 56(1) -; CHECK-DAG: std 3, 48(1) -; CHECK: lfd 1, 64(1) -; CHECK: lfd 2, 72(1) +;; FIXME: Sadly, we now have an extra store to a temp variable here, +;; which comes from (roughly): +;; store i64 <val> to i64* <frame> +;; bitcast (load i64* <frame>) to f64 +;; The code now can elide the load, making: +;; store i64 <val> -> <frame> +;; bitcast i64 <val> to f64 +;; Finally, the bitcast itself turns into a store/load pair. +;; +;; This behavior is new, because previously, llvm was accidentally +;; unable to detect that the load came directly from the store, and +;; elide it. -; CHECK-VSX-DAG: std 6, 72(1) -; CHECK-VSX-DAG: std 5, 64(1) -; CHECK-VSX-DAG: std 4, 56(1) -; CHECK-VSX-DAG: std 3, 48(1) -; CHECK-VSX: li 3, 16 -; CHECK-VSX: addi 4, 1, 48 -; CHECK-VSX: lxsdx 1, 4, 3 -; CHECK-VSX: li 3, 24 -; CHECK-VSX: lxsdx 2, 4, 3 +; CHECK: std 6, 72(1) +; CHECK: std 5, 64(1) +; CHECK: std 4, 56(1) +; CHECK: std 3, 48(1) +; CHECK: std 5, -16(1) +; CHECK: std 6, -8(1) +; CHECK: lfd 1, -16(1) +; CHECK: lfd 2, -8(1) + +; CHECK-VSX: std 6, 72(1) +; CHECK-VSX: std 5, 64(1) +; CHECK-VSX: std 4, 56(1) +; CHECK-VSX: std 3, 48(1) +; CHECK-VSX: std 5, -16(1) +; CHECK-VSX: std 6, -8(1) +; CHECK-VSX: addi 3, 1, -16 +; CHECK-VSX: lxsdx 1, 0, 3 +; CHECK-VSX: addi 3, 1, -8 +; CHECK-VSX: lxsdx 2, 0, 3 Index: test/CodeGen/PowerPC/structsinmem.ll =================================================================== --- test/CodeGen/PowerPC/structsinmem.ll +++ test/CodeGen/PowerPC/structsinmem.ll @@ -113,13 +113,13 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 132(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 144(1) -; CHECK: lwz {{[0-9]+}}, 152(1) -; CHECK: lwz {{[0-9]+}}, 160(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 132(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 144(1) +; CHECK-DAG: lwz {{[0-9]+}}, 152(1) +; CHECK-DAG: lwz {{[0-9]+}}, 160(1) } define i32 @caller2() nounwind { @@ -205,11 +205,11 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: lha {{[0-9]+}}, 126(1) -; CHECK: lha {{[0-9]+}}, 133(1) -; CHECK: lbz {{[0-9]+}}, 119(1) -; CHECK: lwz {{[0-9]+}}, 140(1) -; CHECK: lwz {{[0-9]+}}, 147(1) -; CHECK: lwz {{[0-9]+}}, 154(1) -; CHECK: lwz {{[0-9]+}}, 161(1) +; CHECK-DAG: lha {{[0-9]+}}, 126(1) +; CHECK-DAG: lha {{[0-9]+}}, 133(1) +; CHECK-DAG: lbz {{[0-9]+}}, 119(1) +; CHECK-DAG: lwz {{[0-9]+}}, 140(1) +; CHECK-DAG: lwz {{[0-9]+}}, 147(1) +; CHECK-DAG: lwz {{[0-9]+}}, 154(1) +; CHECK-DAG: lwz {{[0-9]+}}, 161(1) } Index: test/CodeGen/PowerPC/structsinregs.ll =================================================================== --- test/CodeGen/PowerPC/structsinregs.ll +++ test/CodeGen/PowerPC/structsinregs.ll @@ -59,6 +59,7 @@ %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) ret i32 %call +; CHECK-LABEL: caller1 ; CHECK: ld 9, 112(31) ; CHECK: ld 8, 120(31) ; CHECK: ld 7, 128(31) @@ -97,20 +98,21 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: stw 5, 68(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 68(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 80(1) -; CHECK: lwz {{[0-9]+}}, 88(1) -; CHECK: lwz {{[0-9]+}}, 96(1) +; CHECK-LABEL: callee1 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: stw 5, 68(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 68(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 80(1) +; CHECK-DAG: lwz {{[0-9]+}}, 88(1) +; CHECK-DAG: lwz {{[0-9]+}}, 96(1) } define i32 @caller2() nounwind { @@ -139,6 +141,7 @@ %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) ret i32 %call +; CHECK-LABEL: caller2 ; CHECK: stb {{[0-9]+}}, 71(1) ; CHECK: sth {{[0-9]+}}, 69(1) ; CHECK: stb {{[0-9]+}}, 87(1) @@ -184,18 +187,19 @@ %add13 = add nsw i32 %add11, %6 ret i32 %add13 -; CHECK: std 9, 96(1) -; CHECK: std 8, 88(1) -; CHECK: std 7, 80(1) -; CHECK: stw 6, 76(1) -; CHECK: std 5, 64(1) -; CHECK: sth 4, 62(1) -; CHECK: stb 3, 55(1) -; CHECK: lha {{[0-9]+}}, 62(1) -; CHECK: lha {{[0-9]+}}, 69(1) -; CHECK: lbz {{[0-9]+}}, 55(1) -; CHECK: lwz {{[0-9]+}}, 76(1) -; CHECK: lwz {{[0-9]+}}, 83(1) -; CHECK: lwz {{[0-9]+}}, 90(1) -; CHECK: lwz {{[0-9]+}}, 97(1) +; CHECK-LABEL: callee2 +; CHECK-DAG: std 9, 96(1) +; CHECK-DAG: std 8, 88(1) +; CHECK-DAG: std 7, 80(1) +; CHECK-DAG: stw 6, 76(1) +; CHECK-DAG: std 5, 64(1) +; CHECK-DAG: sth 4, 62(1) +; CHECK-DAG: stb 3, 55(1) +; CHECK-DAG: lha {{[0-9]+}}, 62(1) +; CHECK-DAG: lha {{[0-9]+}}, 69(1) +; CHECK-DAG: lbz {{[0-9]+}}, 55(1) +; CHECK-DAG: lwz {{[0-9]+}}, 76(1) +; CHECK-DAG: lwz {{[0-9]+}}, 83(1) +; CHECK-DAG: lwz {{[0-9]+}}, 90(1) +; CHECK-DAG: lwz {{[0-9]+}}, 97(1) } Index: test/CodeGen/SystemZ/unaligned-01.ll =================================================================== --- test/CodeGen/SystemZ/unaligned-01.ll +++ test/CodeGen/SystemZ/unaligned-01.ll @@ -1,10 +1,7 @@ ; Check that unaligned accesses are allowed in general. We check the ; few exceptions (like CRL) in their respective test files. ; -; FIXME: -combiner-alias-analysis (the default for SystemZ) stops -; f1 from being optimized. -; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis=false \ -; RUN: | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s ; Check that these four byte stores become a single word store. define void @f1(i8 *%ptr) { Index: test/CodeGen/Thumb/2010-07-15-debugOrdering.ll =================================================================== --- test/CodeGen/Thumb/2010-07-15-debugOrdering.ll +++ test/CodeGen/Thumb/2010-07-15-debugOrdering.ll @@ -9,9 +9,9 @@ define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind { ; CHECK: blx ___muldf3 -; CHECK: blx ___muldf3 ; CHECK: beq LBB0 ; CHECK: blx ___muldf3 +; CHECK: blx ___muldf3 ; <label>:3 switch i32 %1, label %4 [ i32 0, label %5 Index: test/CodeGen/Thumb/stack-access.ll =================================================================== --- test/CodeGen/Thumb/stack-access.ll +++ test/CodeGen/Thumb/stack-access.ll @@ -74,15 +74,17 @@ } ; Accessing the bottom of a large array shouldn't require materializing a base +; +; CHECK: movs [[REG:r[0-9]+]], #1 +; CHECK: str [[REG]], [sp, #16] +; CHECK: str [[REG]], [sp, #4] + define void @test7() { %arr = alloca [200 x i32], align 4 - ; CHECK: movs [[REG:r[0-9]+]], #1 - ; CHECK: str [[REG]], [sp, #4] %arrayidx = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 1 store i32 1, i32* %arrayidx, align 4 - ; CHECK: str [[REG]], [sp, #16] %arrayidx1 = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 4 store i32 1, i32* %arrayidx1, align 4 Index: test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll =================================================================== --- test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll +++ test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -combiner-alias-analysis -march=x86-64 -mcpu=core2 | FileCheck %s +; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.4" Index: test/CodeGen/X86/2012-11-28-merge-store-alias.ll =================================================================== --- test/CodeGen/X86/2012-11-28-merge-store-alias.ll +++ test/CodeGen/X86/2012-11-28-merge-store-alias.ll @@ -3,8 +3,8 @@ ; CHECK: merge_stores_can ; CHECK: callq foo ; CHECK: xorps %xmm0, %xmm0 -; CHECK-NEXT: movl 36(%rsp), %ebp ; CHECK-NEXT: movups %xmm0 +; CHECK-NEXT: movl 36(%rsp), %ebp ; CHECK: callq foo ; CHECK: ret declare i32 @foo([10 x i32]* ) Index: test/CodeGen/X86/MergeConsecutiveStores.ll =================================================================== --- test/CodeGen/X86/MergeConsecutiveStores.ll +++ test/CodeGen/X86/MergeConsecutiveStores.ll @@ -291,16 +291,12 @@ ret void } -;; On x86, even unaligned copies should be merged to vector ops. -;; TODO: however, this cannot happen at the moment, due to brokenness -;; in MergeConsecutiveStores. See UseAA FIXME in DAGCombiner.cpp -;; visitSTORE. - +;; On x86, even unaligned copies can be merged to vector ops. ; CHECK-LABEL: merge_loads_no_align: ; load: -; CHECK-NOT: vmovups ;; TODO +; CHECK: vmovups ; store: -; CHECK-NOT: vmovups ;; TODO +; CHECK: vmovups ; CHECK: ret define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp { %a1 = icmp sgt i32 %count, 0 @@ -546,8 +542,8 @@ ; CHECK-LABEL: merge_vec_element_and_scalar_load ; CHECK: movq (%rdi), %rax +; CHECK-NEXT: movq 8(%rdi), %rcx ; CHECK-NEXT: movq %rax, 32(%rdi) -; CHECK-NEXT: movq 8(%rdi), %rax -; CHECK-NEXT: movq %rax, 40(%rdi) +; CHECK-NEXT: movq %rcx, 40(%rdi) ; CHECK-NEXT: retq } Index: test/CodeGen/X86/chain_order.ll =================================================================== --- test/CodeGen/X86/chain_order.ll +++ test/CodeGen/X86/chain_order.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: vmovhpd 24(%rdi), %xmm{{.*}} ; CHECK-NEXT: vmovhpd 8(%rdi), %xmm{{.*}} ; CHECK: vmovupd %xmm{{.*}}, (%rdi) -; CHECK-NEXT: vmovupd %xmm{{.*}}, 16(%rdi) +; CHECK: vmovupd %xmm{{.*}}, 16(%rdi) ; CHECK: ret ; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads. Index: test/CodeGen/X86/clear_upper_vector_element_bits.ll =================================================================== --- test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -159,47 +159,47 @@ ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %esi, %xmm0 ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE-NEXT: movd %ecx, %xmm2 +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE-NEXT: movd %edx, %xmm0 -; SSE-NEXT: movd %esi, %xmm1 -; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE-NEXT: movd %edx, %xmm3 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE-NEXT: movd %r9d, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %r8d, %xmm0 -; SSE-NEXT: movd %ecx, %xmm2 -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm3 +; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm4 +; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; Index: test/CodeGen/X86/combiner-aa-0.ll =================================================================== --- test/CodeGen/X86/combiner-aa-0.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -march=x86-64 -combiner-global-alias-analysis -combiner-alias-analysis - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" - %struct.Hash_Key = type { [4 x i32], i32 } -@g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1] - -define void @foo() nounwind { - %t0 = load i32, i32* undef, align 16 ; <i32> [#uses=1] - %t1 = load i32, i32* null, align 4 ; <i32> [#uses=1] - %t2 = srem i32 %t0, 32 ; <i32> [#uses=1] - %t3 = shl i32 1, %t2 ; <i32> [#uses=1] - %t4 = xor i32 %t3, %t1 ; <i32> [#uses=1] - store i32 %t4, i32* null, align 4 - %t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2] - %t6 = load i32, i32* %t5, align 4 ; <i32> [#uses=1] - %t7 = shl i32 1, undef ; <i32> [#uses=1] - %t8 = xor i32 %t7, %t6 ; <i32> [#uses=1] - store i32 %t8, i32* %t5, align 4 - unreachable -} Index: test/CodeGen/X86/combiner-aa-1.ll =================================================================== --- test/CodeGen/X86/combiner-aa-1.ll +++ /dev/null @@ -1,23 +0,0 @@ -; RUN: llc < %s --combiner-alias-analysis --combiner-global-alias-analysis -; PR4880 - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" -target triple = "i386-pc-linux-gnu" - -%struct.alst_node = type { %struct.node } -%struct.arg_node = type { %struct.node, i8*, %struct.alst_node* } -%struct.arglst_node = type { %struct.alst_node, %struct.arg_node*, %struct.arglst_node* } -%struct.lam_node = type { %struct.alst_node, %struct.arg_node*, %struct.alst_node* } -%struct.node = type { i32 (...)**, %struct.node* } - -define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) { -comb_entry: - %.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1] - %0 = load i32 (...)**, i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1] - %1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1] - %2 = load i32 (...)*, i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1] - store %struct.node* undef, %struct.node** %.SV59 - %3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1] - %4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0] - unreachable -} Index: test/CodeGen/X86/copy-eflags.ll =================================================================== --- test/CodeGen/X86/copy-eflags.ll +++ test/CodeGen/X86/copy-eflags.ll @@ -9,19 +9,18 @@ @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 ; CHECK-LABEL: func: -; This tests whether eax is properly saved/restored around the lahf/sahf -; instruction sequences. +; instruction sequences, but as a,b,c and d do not alias, we can reorder to avoid spills. define i32 @func() { entry: %bval = load i8, i8* @b %inc = add i8 %bval, 1 - store i8 %inc, i8* @b - %cval = load i32, i32* @c + store volatile i8 %inc, i8* @b + %cval = load volatile i32, i32* @c %inc1 = add nsw i32 %cval, 1 - store i32 %inc1, i32* @c - %aval = load i8, i8* @a + store volatile i32 %inc1, i32* @c + %aval = load volatile i8, i8* @a %inc2 = add i8 %aval, 1 - store i8 %inc2, i8* @a + store volatile i8 %inc2, i8* @a ; Copy flags produced by the incb of %inc1 to a register, need to save+restore ; eax around it. The flags will be reused by %tobool. ; CHECK: pushl %eax Index: test/CodeGen/X86/dag-merge-fast-accesses.ll =================================================================== --- test/CodeGen/X86/dag-merge-fast-accesses.ll +++ test/CodeGen/X86/dag-merge-fast-accesses.ll @@ -51,19 +51,11 @@ } -;; TODO: FAST *should* be: -;; movups (%rdi), %xmm0 -;; movups %xmm0, 40(%rdi) -;; ..but is not currently. See the UseAA FIXME in DAGCombiner.cpp -;; visitSTORE. - define void @merge_vec_load_and_stores(i64 *%ptr) { ; FAST-LABEL: merge_vec_load_and_stores: ; FAST: # BB#0: -; FAST-NEXT: movq (%rdi), %rax -; FAST-NEXT: movq 8(%rdi), %rcx -; FAST-NEXT: movq %rax, 40(%rdi) -; FAST-NEXT: movq %rcx, 48(%rdi) +; FAST-NEXT: movups (%rdi), %xmm0 +; FAST-NEXT: movups %xmm0, 40(%rdi) ; FAST-NEXT: retq ; ; SLOW-LABEL: merge_vec_load_and_stores: Index: test/CodeGen/X86/dont-trunc-store-double-to-float.ll =================================================================== --- test/CodeGen/X86/dont-trunc-store-double-to-float.ll +++ test/CodeGen/X86/dont-trunc-store-double-to-float.ll @@ -1,9 +1,9 @@ ; RUN: llc -march=x86 < %s | FileCheck %s ; CHECK-LABEL: @bar -; CHECK: movl $1074339512, -; CHECK: movl $1374389535, -; CHECK: movl $1078523331, +; CHECK-DAG: movl $1074339512, +; CHECK-DAG: movl $1374389535, +; CHECK-DAG: movl $1078523331, define void @bar() unnamed_addr { entry-block: %a = alloca double Index: test/CodeGen/X86/extractelement-legalization-store-ordering.ll =================================================================== --- test/CodeGen/X86/extractelement-legalization-store-ordering.ll +++ test/CodeGen/X86/extractelement-legalization-store-ordering.ll @@ -18,13 +18,13 @@ ; CHECK-NEXT: movdqa %xmm0, (%edx) ; CHECK-NEXT: shll $4, %ecx ; CHECK-NEXT: movl (%ecx,%edx), %esi -; CHECK-NEXT: movl 12(%ecx,%edx), %edi +; CHECK-NEXT: movl 4(%ecx,%edx), %edi ; CHECK-NEXT: movl 8(%ecx,%edx), %ebx -; CHECK-NEXT: movl 4(%ecx,%edx), %edx +; CHECK-NEXT: movl 12(%ecx,%edx), %edx ; CHECK-NEXT: movl %esi, 12(%eax,%ecx) -; CHECK-NEXT: movl %edx, (%eax,%ecx) +; CHECK-NEXT: movl %edi, (%eax,%ecx) ; CHECK-NEXT: movl %ebx, 8(%eax,%ecx) -; CHECK-NEXT: movl %edi, 4(%eax,%ecx) +; CHECK-NEXT: movl %edx, 4(%eax,%ecx) ; CHECK-NEXT: popl %esi ; CHECK-NEXT: popl %edi ; CHECK-NEXT: popl %ebx Index: test/CodeGen/X86/i256-add.ll =================================================================== --- test/CodeGen/X86/i256-add.ll +++ test/CodeGen/X86/i256-add.ll @@ -3,16 +3,16 @@ ; RUN: grep sbbl %t | count 7 define void @add(i256* %p, i256* %q) nounwind { - %a = load i256, i256* %p - %b = load i256, i256* %q + %a = load volatile i256, i256* %p + %b = load volatile i256, i256* %q %c = add i256 %a, %b - store i256 %c, i256* %p + store volatile i256 %c, i256* %p ret void } define void @sub(i256* %p, i256* %q) nounwind { - %a = load i256, i256* %p - %b = load i256, i256* %q + %a = load volatile i256, i256* %p + %b = load volatile i256, i256* %q %c = sub i256 %a, %b - store i256 %c, i256* %p + store volatile i256 %c, i256* %p ret void } Index: test/CodeGen/X86/i386-shrink-wrapping.ll =================================================================== --- test/CodeGen/X86/i386-shrink-wrapping.ll +++ test/CodeGen/X86/i386-shrink-wrapping.ll @@ -73,16 +73,16 @@ ; CHECK-NEXT: retl define i32 @eflagsLiveInPrologue() #0 { entry: - %tmp = load i32, i32* @a, align 4 + %tmp = load volatile i32, i32* @a, align 4 %tobool = icmp eq i32 %tmp, 0 br i1 %tobool, label %for.cond.preheader, label %if.then if.then: ; preds = %entry - store i1 true, i1* @d, align 1 + store volatile i1 true, i1* @d, align 1 br label %for.cond.preheader for.cond.preheader: ; preds = %if.then, %entry - %tmp1 = load i32, i32* @b, align 4 + %tmp1 = load volatile i32, i32* @b, align 4 %tobool14 = icmp eq i32 %tmp1, 0 br i1 %tobool14, label %for.end, label %for.body.preheader @@ -93,14 +93,14 @@ br label %for.body for.end: ; preds = %for.cond.preheader - %.b3 = load i1, i1* @d, align 1 + %.b3 = load volatile i1, i1* @d, align 1 %tmp2 = select i1 %.b3, i8 0, i8 6 - store i8 %tmp2, i8* @e, align 1 - %tmp3 = load i8, i8* @f, align 1 + store volatile i8 %tmp2, i8* @e, align 1 + %tmp3 = load volatile i8, i8* @f, align 1 %conv = sext i8 %tmp3 to i32 %add = add nsw i32 %conv, 1 %rem = srem i32 %tmp1, %add - store i32 %rem, i32* @c, align 4 + store volatile i32 %rem, i32* @c, align 4 %conv2 = select i1 %.b3, i32 0, i32 6 %call = tail call i32 (i8*, ...) @varfunc(i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %conv2) #1 ret i32 0 Index: test/CodeGen/X86/merge-consecutive-loads-128.ll =================================================================== --- test/CodeGen/X86/merge-consecutive-loads-128.ll +++ test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -695,12 +695,12 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp { ; SSE2-LABEL: merge_4f32_f32_2345_volatile: ; SSE2: # BB#0: -; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: retq ; Index: test/CodeGen/X86/merge-consecutive-loads-256.ll =================================================================== --- test/CodeGen/X86/merge-consecutive-loads-256.ll +++ test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -694,10 +694,10 @@ ; AVX1: # BB#0: ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -705,10 +705,10 @@ ; AVX2: # BB#0: ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -716,10 +716,10 @@ ; AVX512F: # BB#0: ; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; @@ -728,10 +728,10 @@ ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $0, (%eax), %xmm0, %xmm1 -; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1 ; X32-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0 +; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 Index: test/CodeGen/X86/merge-store-partially-alias-loads.ll =================================================================== --- test/CodeGen/X86/merge-store-partially-alias-loads.ll +++ test/CodeGen/X86/merge-store-partially-alias-loads.ll @@ -21,11 +21,11 @@ ; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64 ; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<LD1[%tmp12]> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64 -; DBGDAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 +; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64 +; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 +; DBGDAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 -; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 -; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ST2]], [[LD1]], t{{[0-9]+}}, undef:i64 -; DBGDAG: X86ISD::RET_FLAG [[ST1]], +; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}}, ; DBGDAG: Type-legalized selection DAG: BB#0 'merge_store_partial_overlap_load:' define void @merge_store_partial_overlap_load([4 x i8]* %tmp) { Index: test/CodeGen/X86/pr18023.ll =================================================================== --- test/CodeGen/X86/pr18023.ll +++ /dev/null @@ -1,31 +0,0 @@ -; RUN: llc < %s -mtriple x86_64-apple-macosx10.9.0 | FileCheck %s -; PR18023 - -; CHECK: movabsq $4294967296, %rcx -; CHECK: movq %rcx, (%rax) -; CHECK: movl $1, 4(%rax) -; CHECK: movl $0, 4(%rax) -; CHECK: movq $1, 4(%rax) - -@c = common global i32 0, align 4 -@a = common global [3 x i32] zeroinitializer, align 4 -@b = common global i32 0, align 4 -@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 - -define void @func() { - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 0), align 4 - %1 = load volatile i32, i32* @b, align 4 - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - %2 = load volatile i32, i32* @b, align 4 - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 2), align 4 - %3 = load volatile i32, i32* @b, align 4 - store i32 3, i32* @c, align 4 - %4 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) - ret void -} - -declare i32 @printf(i8*, ...) Index: test/CodeGen/X86/stores-merging.ll =================================================================== --- test/CodeGen/X86/stores-merging.ll +++ test/CodeGen/X86/stores-merging.ll @@ -13,9 +13,9 @@ ;; the same result in memory in the end. ; CHECK-LABEL: redundant_stores_merging: -; CHECK: movl $123, e+8(%rip) -; CHECK: movabsq $1958505086977, %rax +; CHECK: movabsq $528280977409, %rax ; CHECK: movq %rax, e+4(%rip) +; CHECK: movl $456, e+8(%rip) define void @redundant_stores_merging() { entry: store i32 1, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 1), align 4 @@ -26,9 +26,9 @@ ;; This variant tests PR25154. ; CHECK-LABEL: redundant_stores_merging_reverse: -; CHECK: movl $123, e+8(%rip) -; CHECK: movabsq $1958505086977, %rax +; CHECK: movabsq $528280977409, %rax ; CHECK: movq %rax, e+4(%rip) +; CHECK: movl $456, e+8(%rip) define void @redundant_stores_merging_reverse() { entry: store i32 123, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 2), align 4 @@ -45,9 +45,8 @@ ;; a movl, after the store to 3). ;; CHECK-LABEL: overlapping_stores_merging: -;; CHECK: movw $0, b+2(%rip) +;; CHECK: movl $1, b(%rip) ;; CHECK: movw $2, b+3(%rip) -;; CHECK: movw $1, b(%rip) define void @overlapping_stores_merging() { entry: store i16 0, i16* bitcast (i8* getelementptr inbounds ([8 x i8], [8 x i8]* @b, i64 0, i64 2) to i16*), align 2 Index: test/CodeGen/X86/vector-compare-results.ll =================================================================== --- test/CodeGen/X86/vector-compare-results.ll +++ test/CodeGen/X86/vector-compare-results.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by update_llc_test_checks.py -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 @@ -396,98 +395,98 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -970,98 +969,98 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -1240,196 +1239,196 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -2352,98 +2351,98 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -3246,98 +3245,98 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -3790,196 +3789,196 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -4970,392 +4969,392 @@ ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) -; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) -; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 14(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 12(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 10(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 8(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax Index: test/CodeGen/X86/vector-lzcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-lzcnt-128.ll +++ test/CodeGen/X86/vector-lzcnt-128.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 @@ -30,42 +30,6 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv2i64: -; SSE3: # BB#0: -; SSE3-NEXT: movd %xmm0, %rax -; SSE3-NEXT: bsrq %rax, %rax -; SSE3-NEXT: movl $127, %ecx -; SSE3-NEXT: cmoveq %rcx, %rax -; SSE3-NEXT: xorq $63, %rax -; SSE3-NEXT: movd %rax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE3-NEXT: movd %xmm0, %rax -; SSE3-NEXT: bsrq %rax, %rax -; SSE3-NEXT: cmoveq %rcx, %rax -; SSE3-NEXT: xorq $63, %rax -; SSE3-NEXT: movd %rax, %xmm0 -; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv2i64: -; SSSE3: # BB#0: -; SSSE3-NEXT: movd %xmm0, %rax -; SSSE3-NEXT: bsrq %rax, %rax -; SSSE3-NEXT: movl $127, %ecx -; SSSE3-NEXT: cmoveq %rcx, %rax -; SSSE3-NEXT: xorq $63, %rax -; SSSE3-NEXT: movd %rax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %rax -; SSSE3-NEXT: bsrq %rax, %rax -; SSSE3-NEXT: cmoveq %rcx, %rax -; SSSE3-NEXT: xorq $63, %rax -; SSSE3-NEXT: movd %rax, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv2i64: ; SSE41: # BB#0: ; SSE41-NEXT: pextrq $1, %xmm0, %rax @@ -158,36 +122,6 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv2i64u: -; SSE3: # BB#0: -; SSE3-NEXT: movd %xmm0, %rax -; SSE3-NEXT: bsrq %rax, %rax -; SSE3-NEXT: xorq $63, %rax -; SSE3-NEXT: movd %rax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE3-NEXT: movd %xmm0, %rax -; SSE3-NEXT: bsrq %rax, %rax -; SSE3-NEXT: xorq $63, %rax -; SSE3-NEXT: movd %rax, %xmm0 -; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv2i64u: -; SSSE3: # BB#0: -; SSSE3-NEXT: movd %xmm0, %rax -; SSSE3-NEXT: bsrq %rax, %rax -; SSSE3-NEXT: xorq $63, %rax -; SSSE3-NEXT: movd %rax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %rax -; SSSE3-NEXT: bsrq %rax, %rax -; SSSE3-NEXT: xorq $63, %rax -; SSSE3-NEXT: movd %rax, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv2i64u: ; SSE41: # BB#0: ; SSE41-NEXT: pextrq $1, %xmm0, %rax @@ -286,70 +220,6 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv4i32: -; SSE3: # BB#0: -; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE3-NEXT: movd %xmm1, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: movl $63, %ecx -; SSE3-NEXT: cmovel %ecx, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] -; SSE3-NEXT: movd %xmm2, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: cmovel %ecx, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE3-NEXT: movd %xmm0, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: cmovel %ecx, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE3-NEXT: movd %xmm0, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: cmovel %ecx, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv4i32: -; SSSE3: # BB#0: -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: movl $63, %ecx -; SSSE3-NEXT: cmovel %ecx, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: cmovel %ecx, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: cmovel %ecx, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: cmovel %ecx, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv4i32: ; SSE41: # BB#0: ; SSE41-NEXT: pextrd $1, %xmm0, %eax @@ -469,60 +339,6 @@ ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv4i32u: -; SSE3: # BB#0: -; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSE3-NEXT: movd %xmm1, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] -; SSE3-NEXT: movd %xmm2, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE3-NEXT: movd %xmm0, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE3-NEXT: movd %xmm0, %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $31, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv4i32u: -; SSSE3: # BB#0: -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; SSSE3-NEXT: movd %xmm1, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $31, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv4i32u: ; SSE41: # BB#0: ; SSE41-NEXT: pextrd $1, %xmm0, %eax @@ -652,110 +468,6 @@ ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv8i16: -; SSE3: # BB#0: -; SSE3-NEXT: pextrw $7, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %cx -; SSE3-NEXT: movw $31, %ax -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm1 -; SSE3-NEXT: pextrw $3, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE3-NEXT: pextrw $5, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm3 -; SSE3-NEXT: pextrw $1, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm1 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE3-NEXT: pextrw $6, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: pextrw $2, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm3 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE3-NEXT: pextrw $4, %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: movd %xmm0, %ecx -; SSE3-NEXT: bsrw %cx, %cx -; SSE3-NEXT: cmovew %ax, %cx -; SSE3-NEXT: xorl $15, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv8i16: -; SSSE3: # BB#0: -; SSSE3-NEXT: pextrw $7, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %cx -; SSSE3-NEXT: movw $31, %ax -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: pextrw $3, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: pextrw $5, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: pextrw $1, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSSE3-NEXT: pextrw $6, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: pextrw $2, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSSE3-NEXT: pextrw $4, %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: movd %xmm0, %ecx -; SSSE3-NEXT: bsrw %cx, %cx -; SSSE3-NEXT: cmovew %ax, %cx -; SSSE3-NEXT: xorl $15, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv8i16: ; SSE41: # BB#0: ; SSE41-NEXT: pextrw $1, %xmm0, %eax @@ -956,92 +668,6 @@ ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv8i16u: -; SSE3: # BB#0: -; SSE3-NEXT: pextrw $7, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: pextrw $3, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE3-NEXT: pextrw $5, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm3 -; SSE3-NEXT: pextrw $1, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSE3-NEXT: pextrw $6, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: pextrw $2, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm3 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE3-NEXT: pextrw $4, %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: movd %xmm0, %eax -; SSE3-NEXT: bsrw %ax, %ax -; SSE3-NEXT: xorl $15, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv8i16u: -; SSSE3: # BB#0: -; SSSE3-NEXT: pextrw $7, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: pextrw $3, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: pextrw $5, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: pextrw $1, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; SSSE3-NEXT: pextrw $6, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: pextrw $2, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSSE3-NEXT: pextrw $4, %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: bsrw %ax, %ax -; SSSE3-NEXT: xorl $15, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv8i16u: ; SSE41: # BB#0: ; SSE41-NEXT: pextrw $1, %xmm0, %eax @@ -1174,8 +800,6 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind { ; SSE2-LABEL: testv16i8: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbp -; SSE2-NEXT: pushq %rbx ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: bsrl %eax, %ecx @@ -1183,47 +807,40 @@ ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: bsrl %edx, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: bsrl %ebp, %ebp -; SSE2-NEXT: cmovel %eax, %ebp -; SSE2-NEXT: xorl $7, %ebp -; SSE2-NEXT: movd %ebp, %xmm0 +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: bsrl %edi, %edi -; SSE2-NEXT: cmovel %eax, %edi -; SSE2-NEXT: xorl $7, %edi -; SSE2-NEXT: movd %edi, %xmm1 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx +; SSE2-NEXT: cmovel %eax, %ecx +; SSE2-NEXT: xorl $7, %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: bsrl %esi, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm3 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx @@ -1232,35 +849,42 @@ ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: bsrl %ebx, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: bsrl %edx, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: bsrl %r11d, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: bsrl %esi, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: bsrl %r9d, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: bsrl %r10d, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: bsrl %r8d, %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: bsrl %ecx, %ecx ; SSE2-NEXT: cmovel %eax, %ecx ; SSE2-NEXT: xorl $7, %ecx ; SSE2-NEXT: movd %ecx, %xmm4 @@ -1273,220 +897,8 @@ ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %rbp ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv16i8: -; SSE3: # BB#0: -; SSE3-NEXT: pushq %rbp -; SSE3-NEXT: pushq %rbx -; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE3-NEXT: bsrl %eax, %ecx -; SSE3-NEXT: movl $15, %eax -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: bsrl %ecx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm1 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE3-NEXT: bsrl %edx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp -; SSE3-NEXT: bsrl %ebp, %ebp -; SSE3-NEXT: cmovel %eax, %ebp -; SSE3-NEXT: xorl $7, %ebp -; SSE3-NEXT: movd %ebp, %xmm0 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE3-NEXT: bsrl %edi, %edi -; SSE3-NEXT: cmovel %eax, %edi -; SSE3-NEXT: xorl $7, %edi -; SSE3-NEXT: movd %edi, %xmm1 -; SSE3-NEXT: bsrl %ecx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE3-NEXT: bsrl %esi, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm3 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: bsrl %ecx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm1 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE3-NEXT: bsrl %ebx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: bsrl %edx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm3 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE3-NEXT: bsrl %r11d, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: bsrl %esi, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm2 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE3-NEXT: bsrl %r9d, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: bsrl %r10d, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm3 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE3-NEXT: bsrl %r8d, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm4 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: bsrl %ecx, %ecx -; SSE3-NEXT: cmovel %eax, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE3-NEXT: popq %rbx -; SSE3-NEXT: popq %rbp -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv16i8: -; SSSE3: # BB#0: -; SSSE3-NEXT: pushq %rbp -; SSSE3-NEXT: pushq %rbx -; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: bsrl %eax, %ecx -; SSSE3-NEXT: movl $15, %eax -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: bsrl %ecx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSSE3-NEXT: bsrl %edx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp -; SSSE3-NEXT: bsrl %ebp, %ebp -; SSSE3-NEXT: cmovel %eax, %ebp -; SSSE3-NEXT: xorl $7, %ebp -; SSSE3-NEXT: movd %ebp, %xmm0 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSSE3-NEXT: bsrl %edi, %edi -; SSSE3-NEXT: cmovel %eax, %edi -; SSSE3-NEXT: xorl $7, %edi -; SSSE3-NEXT: movd %edi, %xmm1 -; SSSE3-NEXT: bsrl %ecx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSSE3-NEXT: bsrl %esi, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: bsrl %ecx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSSE3-NEXT: bsrl %ebx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: bsrl %edx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSSE3-NEXT: bsrl %r11d, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: bsrl %esi, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSSE3-NEXT: bsrl %r9d, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: bsrl %r10d, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSSE3-NEXT: bsrl %r8d, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm4 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: bsrl %ecx, %ecx -; SSSE3-NEXT: cmovel %eax, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSSE3-NEXT: popq %rbx -; SSSE3-NEXT: popq %rbp -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv16i8: ; SSE41: # BB#0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax @@ -1758,77 +1170,76 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind { ; SSE2-LABEL: testv16i8u: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbx ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: bsrl %esi, %esi -; SSE2-NEXT: xorl $7, %esi -; SSE2-NEXT: movd %esi, %xmm1 +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: bsrl %ebx, %ebx -; SSE2-NEXT: xorl $7, %ebx -; SSE2-NEXT: movd %ebx, %xmm2 +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: bsrl %edx, %edx -; SSE2-NEXT: xorl $7, %edx -; SSE2-NEXT: movd %edx, %xmm0 -; SSE2-NEXT: bsrl %esi, %edx -; SSE2-NEXT: xorl $7, %edx -; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: bsrl %ecx, %ecx -; SSE2-NEXT: xorl $7, %ecx -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: bsrl %edx, %edx -; SSE2-NEXT: xorl $7, %edx -; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: bsrl %edi, %edx -; SSE2-NEXT: xorl $7, %edx -; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax +; SSE2-NEXT: xorl $7, %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: bsrl %r10d, %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: bsrl %ecx, %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-NEXT: bsrl %r9d, %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: bsrl %r11d, %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: bsrl %r8d, %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: bsrl %eax, %eax ; SSE2-NEXT: xorl $7, %eax ; SSE2-NEXT: movd %eax, %xmm4 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax @@ -1839,181 +1250,8 @@ ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx ; SSE2-NEXT: retq ; -; SSE3-LABEL: testv16i8u: -; SSE3: # BB#0: -; SSE3-NEXT: pushq %rbx -; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE3-NEXT: bsrl %esi, %esi -; SSE3-NEXT: xorl $7, %esi -; SSE3-NEXT: movd %esi, %xmm1 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE3-NEXT: bsrl %ebx, %ebx -; SSE3-NEXT: xorl $7, %ebx -; SSE3-NEXT: movd %ebx, %xmm2 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE3-NEXT: bsrl %edx, %edx -; SSE3-NEXT: xorl $7, %edx -; SSE3-NEXT: movd %edx, %xmm0 -; SSE3-NEXT: bsrl %esi, %edx -; SSE3-NEXT: xorl $7, %edx -; SSE3-NEXT: movd %edx, %xmm3 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE3-NEXT: bsrl %ecx, %ecx -; SSE3-NEXT: xorl $7, %ecx -; SSE3-NEXT: movd %ecx, %xmm0 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE3-NEXT: bsrl %edx, %edx -; SSE3-NEXT: xorl $7, %edx -; SSE3-NEXT: movd %edx, %xmm1 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE3-NEXT: bsrl %edi, %edx -; SSE3-NEXT: xorl $7, %edx -; SSE3-NEXT: movd %edx, %xmm0 -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE3-NEXT: bsrl %r10d, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: bsrl %ecx, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm3 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE3-NEXT: bsrl %r9d, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: bsrl %r11d, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE3-NEXT: bsrl %r8d, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm4 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE3-NEXT: bsrl %eax, %eax -; SSE3-NEXT: xorl $7, %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE3-NEXT: popq %rbx -; SSE3-NEXT: retq -; -; SSSE3-LABEL: testv16i8u: -; SSSE3: # BB#0: -; SSSE3-NEXT: pushq %rbx -; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSSE3-NEXT: bsrl %esi, %esi -; SSSE3-NEXT: xorl $7, %esi -; SSSE3-NEXT: movd %esi, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSSE3-NEXT: bsrl %ebx, %ebx -; SSSE3-NEXT: xorl $7, %ebx -; SSSE3-NEXT: movd %ebx, %xmm2 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSSE3-NEXT: bsrl %edx, %edx -; SSSE3-NEXT: xorl $7, %edx -; SSSE3-NEXT: movd %edx, %xmm0 -; SSSE3-NEXT: bsrl %esi, %edx -; SSSE3-NEXT: xorl $7, %edx -; SSSE3-NEXT: movd %edx, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSSE3-NEXT: bsrl %ecx, %ecx -; SSSE3-NEXT: xorl $7, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSSE3-NEXT: bsrl %edx, %edx -; SSSE3-NEXT: xorl $7, %edx -; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSSE3-NEXT: bsrl %edi, %edx -; SSSE3-NEXT: xorl $7, %edx -; SSSE3-NEXT: movd %edx, %xmm0 -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSSE3-NEXT: bsrl %r10d, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: bsrl %ecx, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSSE3-NEXT: bsrl %r9d, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: bsrl %r11d, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSSE3-NEXT: bsrl %r8d, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm4 -; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSSE3-NEXT: bsrl %eax, %eax -; SSSE3-NEXT: xorl $7, %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSSE3-NEXT: popq %rbx -; SSSE3-NEXT: retq -; ; SSE41-LABEL: testv16i8u: ; SSE41: # BB#0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax Index: test/CodeGen/X86/vector-shuffle-variable-128.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-variable-128.ll +++ test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 @@ -36,8 +36,8 @@ ; SSE-NEXT: movslq %edi, %rax ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movslq %esi, %rcx -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; @@ -48,7 +48,7 @@ ; AVX-NEXT: movslq %esi, %rcx ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX-NEXT: retq %x0 = extractelement <2 x i64> %x, i32 %i0 %x1 = extractelement <2 x i64> %x, i32 %i1 @@ -67,10 +67,10 @@ ; SSE2-NEXT: movslq %ecx, %rcx ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; @@ -83,10 +83,10 @@ ; SSSE3-NEXT: movslq %ecx, %rcx ; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; @@ -136,10 +136,10 @@ ; SSE2-NEXT: movslq %ecx, %rcx ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; @@ -152,10 +152,10 @@ ; SSSE3-NEXT: movslq %ecx, %rcx ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; @@ -198,100 +198,96 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind { ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSE2: # BB#0: -; SSE2-NEXT: movswq %di, %rax +; SSE2-NEXT: movswq %di, %r10 ; SSE2-NEXT: movswq %si, %rsi -; SSE2-NEXT: movswq %dx, %rdx -; SSE2-NEXT: movswq %cx, %r10 -; SSE2-NEXT: movswq %r8w, %r11 +; SSE2-NEXT: movswq %dx, %r11 +; SSE2-NEXT: movswq %cx, %rcx +; SSE2-NEXT: movswq %r8w, %r8 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movswq %r9w, %r8 -; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: movswq %r9w, %rax +; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdx ; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdi -; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %edi -; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: movd %edi, %xmm1 -; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-NEXT: movd %esi, %xmm1 +; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax ; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSSE3: # BB#0: -; SSSE3-NEXT: movswq %di, %rax +; SSSE3-NEXT: movswq %di, %r10 ; SSSE3-NEXT: movswq %si, %rsi -; SSSE3-NEXT: movswq %dx, %rdx -; SSSE3-NEXT: movswq %cx, %r10 -; SSSE3-NEXT: movswq %r8w, %r11 +; SSSE3-NEXT: movswq %dx, %r11 +; SSSE3-NEXT: movswq %cx, %rcx +; SSSE3-NEXT: movswq %r8w, %r8 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movswq %r9w, %r8 -; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: movswq %r9w, %rax +; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdx ; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdi -; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi -; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSSE3-NEXT: movd %edi, %xmm0 +; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movd %ecx, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSSE3-NEXT: movd %edi, %xmm1 -; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: movd %esi, %xmm1 +; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax ; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbx ; SSE41-NEXT: movswq %di, %rax -; SSE41-NEXT: movswq %si, %rbx -; SSE41-NEXT: movswq %dx, %r11 +; SSE41-NEXT: movswq %si, %rsi +; SSE41-NEXT: movswq %dx, %rdx ; SSE41-NEXT: movswq %cx, %r10 ; SSE41-NEXT: movswq %r8w, %rdi ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movswq %r9w, %rcx -; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rdx -; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rsi -; SSE41-NEXT: movzwl -16(%rsp,%rdx,2), %edx -; SSE41-NEXT: movzwl -16(%rsp,%rsi,2), %esi -; SSE41-NEXT: movzwl -16(%rsp,%rax,2), %eax +; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %r8 +; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %r9 +; SSE41-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE41-NEXT: movd %eax, %xmm0 -; SSE41-NEXT: pinsrw $1, -16(%rsp,%rbx,2), %xmm0 -; SSE41-NEXT: pinsrw $2, -16(%rsp,%r11,2), %xmm0 -; SSE41-NEXT: pinsrw $3, -16(%rsp,%r10,2), %xmm0 -; SSE41-NEXT: pinsrw $4, -16(%rsp,%rdi,2), %xmm0 -; SSE41-NEXT: pinsrw $5, -16(%rsp,%rcx,2), %xmm0 -; SSE41-NEXT: pinsrw $6, %edx, %xmm0 -; SSE41-NEXT: pinsrw $7, %esi, %xmm0 -; SSE41-NEXT: popq %rbx +; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0 +; SSE41-NEXT: pinsrw $2, -24(%rsp,%rdx,2), %xmm0 +; SSE41-NEXT: pinsrw $3, -24(%rsp,%r10,2), %xmm0 +; SSE41-NEXT: pinsrw $4, -24(%rsp,%rdi,2), %xmm0 +; SSE41-NEXT: pinsrw $5, -24(%rsp,%rcx,2), %xmm0 +; SSE41-NEXT: pinsrw $6, -24(%rsp,%r8,2), %xmm0 +; SSE41-NEXT: pinsrw $7, -24(%rsp,%r9,2), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: @@ -307,8 +303,6 @@ ; AVX-NEXT: movswq %r9w, %rax ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rsi ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rdx -; AVX-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; AVX-NEXT: movzwl -24(%rsp,%rdx,2), %edx ; AVX-NEXT: movzwl -24(%rsp,%r10,2), %ebx ; AVX-NEXT: vmovd %ebx, %xmm0 ; AVX-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0 @@ -316,8 +310,8 @@ ; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 ; AVX-NEXT: vpinsrw $4, -24(%rsp,%rdi,2), %xmm0, %xmm0 ; AVX-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $7, %edx, %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $6, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $7, -24(%rsp,%rdx,2), %xmm0, %xmm0 ; AVX-NEXT: popq %rbx ; AVX-NEXT: popq %r14 ; AVX-NEXT: retq @@ -347,67 +341,67 @@ ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 ; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r11 ; SSE2-NEXT: movzbl (%r10,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm15 +; SSE2-NEXT: movd %eax, %xmm8 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm8 +; SSE2-NEXT: movd %eax, %xmm15 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm9 -; SSE2-NEXT: movsbq %dl, %rax +; SSE2-NEXT: movsbq %cl, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm10 -; SSE2-NEXT: movsbq %dil, %rax +; SSE2-NEXT: movsbq %r9b, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movd %eax, %xmm7 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm11 -; SSE2-NEXT: movsbq %r8b, %rax +; SSE2-NEXT: movsbq %sil, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm7 +; SSE2-NEXT: movd %eax, %xmm6 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movd %eax, %xmm12 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm12 +; SSE2-NEXT: movd %eax, %xmm5 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm13 -; SSE2-NEXT: movsbq %cl, %rax +; SSE2-NEXT: movsbq %dl, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm6 +; SSE2-NEXT: movd %eax, %xmm4 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm14 -; SSE2-NEXT: movsbq %sil, %rax +; SSE2-NEXT: movsbq %r8b, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm5 +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movsbq %r9b, %rax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movsbq %dil, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: @@ -416,189 +410,145 @@ ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 ; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r11 ; SSSE3-NEXT: movzbl (%r10,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm15 +; SSSE3-NEXT: movd %eax, %xmm8 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm8 +; SSSE3-NEXT: movd %eax, %xmm15 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm9 -; SSSE3-NEXT: movsbq %dl, %rax +; SSSE3-NEXT: movsbq %cl, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm3 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm10 -; SSSE3-NEXT: movsbq %dil, %rax +; SSSE3-NEXT: movsbq %r9b, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movd %eax, %xmm7 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm11 -; SSSE3-NEXT: movsbq %r8b, %rax +; SSSE3-NEXT: movsbq %sil, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm7 +; SSSE3-NEXT: movd %eax, %xmm6 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movd %eax, %xmm12 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm12 +; SSSE3-NEXT: movd %eax, %xmm5 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm13 -; SSSE3-NEXT: movsbq %cl, %rax +; SSSE3-NEXT: movsbq %dl, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm6 +; SSSE3-NEXT: movd %eax, %xmm4 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm14 -; SSSE3-NEXT: movsbq %sil, %rax +; SSSE3-NEXT: movsbq %r8b, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm5 +; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm4 -; SSSE3-NEXT: movsbq %r9b, %rax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movsbq %dil, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbp -; SSE41-NEXT: pushq %r15 -; SSE41-NEXT: pushq %r14 -; SSE41-NEXT: pushq %r13 -; SSE41-NEXT: pushq %r12 -; SSE41-NEXT: pushq %rbx -; SSE41-NEXT: movsbq %dil, %r15 -; SSE41-NEXT: movsbq %sil, %r14 -; SSE41-NEXT: movsbq %dl, %r11 -; SSE41-NEXT: movsbq %cl, %r10 -; SSE41-NEXT: movsbq %r8b, %r8 +; SSE41-NEXT: movsbq %dil, %rdi ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE41-NEXT: movsbq %r9b, %r9 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r12 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r13 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbx ; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSE41-NEXT: movzbl (%r15,%rax), %ecx -; SSE41-NEXT: movd %ecx, %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r15 -; SSE41-NEXT: pinsrb $1, (%r14,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r14 -; SSE41-NEXT: pinsrb $2, (%r11,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r11 -; SSE41-NEXT: pinsrb $3, (%r10,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 -; SSE41-NEXT: pinsrb $4, (%r8,%rax), %xmm0 +; SSE41-NEXT: movzbl (%rdi,%rax), %edi +; SSE41-NEXT: movd %edi, %xmm0 +; SSE41-NEXT: movsbq %sil, %rsi +; SSE41-NEXT: pinsrb $1, (%rsi,%rax), %xmm0 +; SSE41-NEXT: movsbq %dl, %rdx +; SSE41-NEXT: pinsrb $2, (%rdx,%rax), %xmm0 +; SSE41-NEXT: movsbq %cl, %rcx +; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq %r8b, %rcx +; SSE41-NEXT: pinsrb $4, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq %r9b, %rcx +; SSE41-NEXT: pinsrb $5, (%rcx,%rax), %xmm0 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx -; SSE41-NEXT: pinsrb $5, (%r9,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rdx -; SSE41-NEXT: movzbl (%r12,%rax), %esi -; SSE41-NEXT: movzbl (%r13,%rax), %edi -; SSE41-NEXT: movzbl (%rbp,%rax), %ebp -; SSE41-NEXT: movzbl (%rbx,%rax), %ebx -; SSE41-NEXT: movzbl (%r15,%rax), %r8d -; SSE41-NEXT: movzbl (%r14,%rax), %r9d -; SSE41-NEXT: movzbl (%r11,%rax), %r11d -; SSE41-NEXT: movzbl (%r10,%rax), %r10d -; SSE41-NEXT: movzbl (%rcx,%rax), %ecx -; SSE41-NEXT: movzbl (%rdx,%rax), %eax -; SSE41-NEXT: pinsrb $6, %esi, %xmm0 -; SSE41-NEXT: pinsrb $7, %edi, %xmm0 -; SSE41-NEXT: pinsrb $8, %ebp, %xmm0 -; SSE41-NEXT: pinsrb $9, %ebx, %xmm0 -; SSE41-NEXT: pinsrb $10, %r8d, %xmm0 -; SSE41-NEXT: pinsrb $11, %r9d, %xmm0 -; SSE41-NEXT: pinsrb $12, %r11d, %xmm0 -; SSE41-NEXT: pinsrb $13, %r10d, %xmm0 -; SSE41-NEXT: pinsrb $14, %ecx, %xmm0 -; SSE41-NEXT: pinsrb $15, %eax, %xmm0 -; SSE41-NEXT: popq %rbx -; SSE41-NEXT: popq %r12 -; SSE41-NEXT: popq %r13 -; SSE41-NEXT: popq %r14 -; SSE41-NEXT: popq %r15 -; SSE41-NEXT: popq %rbp +; SSE41-NEXT: pinsrb $6, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $7, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $8, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $9, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $10, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $11, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $12, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $13, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $14, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $15, (%rcx,%rax), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; AVX: # BB#0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: pushq %r15 -; AVX-NEXT: pushq %r14 -; AVX-NEXT: pushq %r13 -; AVX-NEXT: pushq %r12 -; AVX-NEXT: pushq %rbx -; AVX-NEXT: movsbq %dil, %r10 -; AVX-NEXT: movsbq %sil, %r11 -; AVX-NEXT: movsbq %dl, %r14 -; AVX-NEXT: movsbq %cl, %r15 -; AVX-NEXT: movsbq %r8b, %r8 +; AVX-NEXT: movsbq %dil, %rax ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: movsbq %r9b, %r9 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r12 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r13 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx ; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rdi -; AVX-NEXT: movzbl (%r10,%rdi), %eax +; AVX-NEXT: movzbl (%rax,%rdi), %eax ; AVX-NEXT: vmovd %eax, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 -; AVX-NEXT: vpinsrb $1, (%r11,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r11 -; AVX-NEXT: vpinsrb $2, (%r14,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r14 -; AVX-NEXT: vpinsrb $3, (%r15,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r15 -; AVX-NEXT: vpinsrb $4, (%r8,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r8 -; AVX-NEXT: vpinsrb $5, (%r9,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rsi -; AVX-NEXT: movzbl (%r12,%rdi), %edx -; AVX-NEXT: movzbl (%r13,%rdi), %ebx -; AVX-NEXT: movzbl (%rbp,%rdi), %ebp -; AVX-NEXT: movzbl (%rcx,%rdi), %ecx -; AVX-NEXT: movzbl (%r10,%rdi), %eax -; AVX-NEXT: movzbl (%r11,%rdi), %r9d -; AVX-NEXT: movzbl (%r14,%rdi), %r10d -; AVX-NEXT: movzbl (%r15,%rdi), %r11d -; AVX-NEXT: movzbl (%r8,%rdi), %r8d -; AVX-NEXT: movzbl (%rsi,%rdi), %esi -; AVX-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $7, %ebx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $13, %r11d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $14, %r8d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $15, %esi, %xmm0, %xmm0 -; AVX-NEXT: popq %rbx -; AVX-NEXT: popq %r12 -; AVX-NEXT: popq %r13 -; AVX-NEXT: popq %r14 -; AVX-NEXT: popq %r15 -; AVX-NEXT: popq %rbp +; AVX-NEXT: movsbq %sil, %rax +; AVX-NEXT: vpinsrb $1, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %dl, %rax +; AVX-NEXT: vpinsrb $2, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %cl, %rax +; AVX-NEXT: vpinsrb $3, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %r8b, %rax +; AVX-NEXT: vpinsrb $4, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %r9b, %rax +; AVX-NEXT: vpinsrb $5, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $6, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $7, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $8, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $9, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $10, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $11, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $12, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $13, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $14, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $15, (%rax,%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <16 x i8> %x, i8 %i0 %x1 = extractelement <16 x i8> %x, i8 %i1 @@ -649,11 +599,11 @@ ; SSE2-NEXT: movslq 12(%rdi), %rsi ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32: @@ -665,11 +615,11 @@ ; SSSE3-NEXT: movslq 12(%rdi), %rsi ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32: @@ -719,270 +669,218 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind { ; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE2: # BB#0: -; SSE2-NEXT: movsbq (%rdi), %rcx +; SSE2-NEXT: movsbq (%rdi), %rax ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movsbq 8(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm8 -; SSE2-NEXT: movsbq 12(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm9 -; SSE2-NEXT: movsbq 4(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm3 -; SSE2-NEXT: movsbq 14(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm10 -; SSE2-NEXT: movsbq 6(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm5 -; SSE2-NEXT: movsbq 10(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm11 -; SSE2-NEXT: movsbq 2(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm7 -; SSE2-NEXT: movsbq 15(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm12 -; SSE2-NEXT: movsbq 7(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: movsbq 11(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm13 -; SSE2-NEXT: movsbq 3(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm6 -; SSE2-NEXT: movsbq 13(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm14 -; SSE2-NEXT: movsbq 5(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm4 -; SSE2-NEXT: movsbq 9(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm15 -; SSE2-NEXT: movsbq 1(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7] +; SSE2-NEXT: movsbq 15(%rdi), %rdx +; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm8 +; SSE2-NEXT: movsbq 7(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm15 +; SSE2-NEXT: movsbq 11(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm9 +; SSE2-NEXT: movsbq 3(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: movsbq 13(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm10 +; SSE2-NEXT: movsbq 5(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm7 +; SSE2-NEXT: movsbq 9(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm11 +; SSE2-NEXT: movsbq 1(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm6 +; SSE2-NEXT: movsbq 14(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm12 +; SSE2-NEXT: movsbq 6(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: movsbq 10(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm13 +; SSE2-NEXT: movsbq 2(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: movsbq 12(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm14 +; SSE2-NEXT: movsbq 4(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: movsbq 8(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: movzbl (%rax,%rcx), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSSE3: # BB#0: -; SSSE3-NEXT: movsbq (%rdi), %rcx +; SSSE3-NEXT: movsbq (%rdi), %rax ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movsbq 8(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm8 -; SSSE3-NEXT: movsbq 12(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm9 -; SSSE3-NEXT: movsbq 4(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: movsbq 14(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm10 -; SSSE3-NEXT: movsbq 6(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm5 -; SSSE3-NEXT: movsbq 10(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm11 -; SSSE3-NEXT: movsbq 2(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm7 -; SSSE3-NEXT: movsbq 15(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm12 -; SSSE3-NEXT: movsbq 7(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: movsbq 11(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm13 -; SSSE3-NEXT: movsbq 3(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm6 -; SSSE3-NEXT: movsbq 13(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm14 -; SSSE3-NEXT: movsbq 5(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm4 -; SSSE3-NEXT: movsbq 9(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm15 -; SSSE3-NEXT: movsbq 1(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7] +; SSSE3-NEXT: movsbq 15(%rdi), %rdx +; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm8 +; SSSE3-NEXT: movsbq 7(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm15 +; SSSE3-NEXT: movsbq 11(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm9 +; SSSE3-NEXT: movsbq 3(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm3 +; SSSE3-NEXT: movsbq 13(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm10 +; SSSE3-NEXT: movsbq 5(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm7 +; SSSE3-NEXT: movsbq 9(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm11 +; SSSE3-NEXT: movsbq 1(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm6 +; SSSE3-NEXT: movsbq 14(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm12 +; SSSE3-NEXT: movsbq 6(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: movsbq 10(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm13 +; SSSE3-NEXT: movsbq 2(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: movsbq 12(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm14 +; SSSE3-NEXT: movsbq 4(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: movsbq 8(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: movzbl (%rax,%rcx), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbp -; SSE41-NEXT: pushq %r15 -; SSE41-NEXT: pushq %r14 -; SSE41-NEXT: pushq %r13 -; SSE41-NEXT: pushq %r12 -; SSE41-NEXT: pushq %rbx -; SSE41-NEXT: movsbq (%rdi), %rax +; SSE41-NEXT: movsbq (%rdi), %rcx ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE41-NEXT: movsbq 1(%rdi), %r15 -; SSE41-NEXT: movsbq 2(%rdi), %r8 -; SSE41-NEXT: movsbq 3(%rdi), %r9 -; SSE41-NEXT: movsbq 4(%rdi), %r10 -; SSE41-NEXT: movsbq 5(%rdi), %r11 -; SSE41-NEXT: movsbq 6(%rdi), %r14 -; SSE41-NEXT: movsbq 7(%rdi), %r12 -; SSE41-NEXT: movsbq 8(%rdi), %r13 -; SSE41-NEXT: movsbq 9(%rdi), %rdx +; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax +; SSE41-NEXT: movzbl (%rcx,%rax), %ecx +; SSE41-NEXT: movd %ecx, %xmm0 +; SSE41-NEXT: movsbq 1(%rdi), %rcx +; SSE41-NEXT: pinsrb $1, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 2(%rdi), %rcx +; SSE41-NEXT: pinsrb $2, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 3(%rdi), %rcx +; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 4(%rdi), %rcx +; SSE41-NEXT: pinsrb $4, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 5(%rdi), %rcx +; SSE41-NEXT: pinsrb $5, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 6(%rdi), %rcx +; SSE41-NEXT: pinsrb $6, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 7(%rdi), %rcx +; SSE41-NEXT: pinsrb $7, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 8(%rdi), %rcx +; SSE41-NEXT: pinsrb $8, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 9(%rdi), %rcx +; SSE41-NEXT: pinsrb $9, (%rcx,%rax), %xmm0 ; SSE41-NEXT: movsbq 10(%rdi), %rcx -; SSE41-NEXT: movsbq 11(%rdi), %rsi -; SSE41-NEXT: movsbq 12(%rdi), %rbx -; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp -; SSE41-NEXT: movzbl (%rax,%rbp), %eax -; SSE41-NEXT: movd %eax, %xmm0 -; SSE41-NEXT: movsbq 13(%rdi), %rax -; SSE41-NEXT: pinsrb $1, (%r15,%rbp), %xmm0 -; SSE41-NEXT: movsbq 14(%rdi), %r15 -; SSE41-NEXT: movsbq 15(%rdi), %rdi -; SSE41-NEXT: movzbl (%rdi,%rbp), %edi -; SSE41-NEXT: movzbl (%r15,%rbp), %r15d -; SSE41-NEXT: movzbl (%rax,%rbp), %eax -; SSE41-NEXT: movzbl (%rbx,%rbp), %ebx -; SSE41-NEXT: movzbl (%rsi,%rbp), %esi -; SSE41-NEXT: movzbl (%rcx,%rbp), %ecx -; SSE41-NEXT: movzbl (%rdx,%rbp), %edx -; SSE41-NEXT: movzbl (%r13,%rbp), %r13d -; SSE41-NEXT: movzbl (%r12,%rbp), %r12d -; SSE41-NEXT: movzbl (%r14,%rbp), %r14d -; SSE41-NEXT: movzbl (%r11,%rbp), %r11d -; SSE41-NEXT: movzbl (%r10,%rbp), %r10d -; SSE41-NEXT: movzbl (%r9,%rbp), %r9d -; SSE41-NEXT: movzbl (%r8,%rbp), %ebp -; SSE41-NEXT: pinsrb $2, %ebp, %xmm0 -; SSE41-NEXT: pinsrb $3, %r9d, %xmm0 -; SSE41-NEXT: pinsrb $4, %r10d, %xmm0 -; SSE41-NEXT: pinsrb $5, %r11d, %xmm0 -; SSE41-NEXT: pinsrb $6, %r14d, %xmm0 -; SSE41-NEXT: pinsrb $7, %r12d, %xmm0 -; SSE41-NEXT: pinsrb $8, %r13d, %xmm0 -; SSE41-NEXT: pinsrb $9, %edx, %xmm0 -; SSE41-NEXT: pinsrb $10, %ecx, %xmm0 -; SSE41-NEXT: pinsrb $11, %esi, %xmm0 -; SSE41-NEXT: pinsrb $12, %ebx, %xmm0 -; SSE41-NEXT: pinsrb $13, %eax, %xmm0 -; SSE41-NEXT: pinsrb $14, %r15d, %xmm0 -; SSE41-NEXT: pinsrb $15, %edi, %xmm0 -; SSE41-NEXT: popq %rbx -; SSE41-NEXT: popq %r12 -; SSE41-NEXT: popq %r13 -; SSE41-NEXT: popq %r14 -; SSE41-NEXT: popq %r15 -; SSE41-NEXT: popq %rbp +; SSE41-NEXT: pinsrb $10, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 11(%rdi), %rcx +; SSE41-NEXT: pinsrb $11, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 12(%rdi), %rcx +; SSE41-NEXT: pinsrb $12, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 13(%rdi), %rcx +; SSE41-NEXT: pinsrb $13, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 14(%rdi), %rcx +; SSE41-NEXT: pinsrb $14, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 15(%rdi), %rcx +; SSE41-NEXT: pinsrb $15, (%rcx,%rax), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; AVX: # BB#0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: pushq %r15 -; AVX-NEXT: pushq %r14 -; AVX-NEXT: pushq %r13 -; AVX-NEXT: pushq %r12 -; AVX-NEXT: pushq %rbx -; AVX-NEXT: movsbq (%rdi), %rsi +; AVX-NEXT: movsbq (%rdi), %rax ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: movsbq 1(%rdi), %r15 -; AVX-NEXT: movsbq 2(%rdi), %r8 -; AVX-NEXT: movsbq 3(%rdi), %r9 -; AVX-NEXT: movsbq 4(%rdi), %r10 -; AVX-NEXT: movsbq 5(%rdi), %r11 -; AVX-NEXT: movsbq 6(%rdi), %r14 -; AVX-NEXT: movsbq 7(%rdi), %r12 -; AVX-NEXT: movsbq 8(%rdi), %r13 -; AVX-NEXT: movsbq 9(%rdi), %rdx +; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; AVX-NEXT: movzbl (%rax,%rcx), %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: movsbq 1(%rdi), %rax +; AVX-NEXT: vpinsrb $1, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 2(%rdi), %rax +; AVX-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 3(%rdi), %rax +; AVX-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 4(%rdi), %rax +; AVX-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 5(%rdi), %rax +; AVX-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 6(%rdi), %rax +; AVX-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 7(%rdi), %rax +; AVX-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 8(%rdi), %rax +; AVX-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 9(%rdi), %rax +; AVX-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0 ; AVX-NEXT: movsbq 10(%rdi), %rax -; AVX-NEXT: movsbq 11(%rdi), %rcx -; AVX-NEXT: movsbq 12(%rdi), %rbx -; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp -; AVX-NEXT: movzbl (%rsi,%rbp), %esi -; AVX-NEXT: vmovd %esi, %xmm0 -; AVX-NEXT: movsbq 13(%rdi), %rsi -; AVX-NEXT: vpinsrb $1, (%r15,%rbp), %xmm0, %xmm0 -; AVX-NEXT: movsbq 14(%rdi), %r15 -; AVX-NEXT: movsbq 15(%rdi), %rdi -; AVX-NEXT: movzbl (%rdi,%rbp), %edi -; AVX-NEXT: movzbl (%r15,%rbp), %r15d -; AVX-NEXT: movzbl (%rsi,%rbp), %esi -; AVX-NEXT: movzbl (%rbx,%rbp), %ebx -; AVX-NEXT: movzbl (%rcx,%rbp), %ecx -; AVX-NEXT: movzbl (%rax,%rbp), %eax -; AVX-NEXT: movzbl (%rdx,%rbp), %edx -; AVX-NEXT: movzbl (%r13,%rbp), %r13d -; AVX-NEXT: movzbl (%r12,%rbp), %r12d -; AVX-NEXT: movzbl (%r14,%rbp), %r14d -; AVX-NEXT: movzbl (%r11,%rbp), %r11d -; AVX-NEXT: movzbl (%r10,%rbp), %r10d -; AVX-NEXT: movzbl (%r9,%rbp), %r9d -; AVX-NEXT: movzbl (%r8,%rbp), %ebp -; AVX-NEXT: vpinsrb $2, %ebp, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $7, %r12d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $8, %r13d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $12, %ebx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $13, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $14, %r15d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0 -; AVX-NEXT: popq %rbx -; AVX-NEXT: popq %r12 -; AVX-NEXT: popq %r13 -; AVX-NEXT: popq %r14 -; AVX-NEXT: popq %r15 -; AVX-NEXT: popq %rbp +; AVX-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 11(%rdi), %rax +; AVX-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 12(%rdi), %rax +; AVX-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 13(%rdi), %rax +; AVX-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 14(%rdi), %rax +; AVX-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 15(%rdi), %rax +; AVX-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0 ; AVX-NEXT: retq %p0 = getelementptr inbounds i8, i8* %i, i64 0 %p1 = getelementptr inbounds i8, i8* %i, i64 1 @@ -1065,8 +963,8 @@ ; SSE-NEXT: movslq %ecx, %rcx ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; @@ -1079,9 +977,9 @@ ; AVX-NEXT: movslq %ecx, %rcx ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX-NEXT: retq %x0 = extractelement <4 x float> %x, i32 %i0 %x1 = extractelement <4 x float> %x, i32 %i1 @@ -1105,27 +1003,27 @@ ; SSE2-NEXT: movswq %r8w, %rdi ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movswq %r9w, %rax -; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi ; SSE2-NEXT: xorl %edx, %edx ; SSE2-NEXT: movd %edx, %xmm0 ; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movd %esi, %xmm2 ; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax -; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %ecx -; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %eax +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: @@ -1138,27 +1036,27 @@ ; SSSE3-NEXT: movswq %r8w, %rdi ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movswq %r9w, %rax -; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi ; SSSE3-NEXT: xorl %edx, %edx ; SSSE3-NEXT: movd %edx, %xmm0 ; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movd %ecx, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movd %esi, %xmm2 ; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax -; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %eax +; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: Index: test/CodeGen/X86/vector-shuffle-variable-256.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-variable-256.ll +++ test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 @@ -18,7 +18,7 @@ ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: movq %rbp, %rsp ; ALL-NEXT: popq %rbp ; ALL-NEXT: retq @@ -66,7 +66,7 @@ ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: retq %x0 = extractelement <2 x double> %x, i64 %i0 %x1 = extractelement <2 x double> %x, i64 %i1 @@ -89,11 +89,11 @@ ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -107,11 +107,11 @@ ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq @@ -136,7 +136,7 @@ ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp @@ -152,7 +152,7 @@ ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp @@ -175,11 +175,11 @@ ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64: @@ -187,11 +187,11 @@ ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %x0 = extractelement <2 x i64> %x, i64 %i0 %x1 = extractelement <2 x i64> %x, i64 %i1 @@ -209,29 +209,29 @@ ; AVX1: # BB#0: ; AVX1-NEXT: pushq %rbp ; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: pushq %rbx ; AVX1-NEXT: andq $-32, %rsp ; AVX1-NEXT: subq $64, %rsp ; AVX1-NEXT: movslq %edi, %rax -; AVX1-NEXT: movslq %esi, %rsi -; AVX1-NEXT: movslq %edx, %rdx -; AVX1-NEXT: movslq %ecx, %r11 -; AVX1-NEXT: movslq %r8d, %r10 +; AVX1-NEXT: movslq %esi, %rbx +; AVX1-NEXT: movslq %edx, %r11 +; AVX1-NEXT: movslq %ecx, %r10 +; AVX1-NEXT: movslq %r8d, %rdi ; AVX1-NEXT: vmovaps %ymm0, (%rsp) -; AVX1-NEXT: movslq %r9d, %r8 -; AVX1-NEXT: movslq 16(%rbp), %rdi -; AVX1-NEXT: movslq 24(%rbp), %rcx +; AVX1-NEXT: movslq %r9d, %rcx +; AVX1-NEXT: movslq 16(%rbp), %rdx +; AVX1-NEXT: movslq 24(%rbp), %rsi ; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] -; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: leaq -8(%rbp), %rsp +; AVX1-NEXT: popq %rbx ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; @@ -283,26 +283,26 @@ define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind { ; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32: ; ALL: # BB#0: +; ALL-NEXT: pushq %rbx ; ALL-NEXT: movslq %edi, %rax -; ALL-NEXT: movslq %esi, %rsi -; ALL-NEXT: movslq %edx, %rdx -; ALL-NEXT: movslq %ecx, %r11 -; ALL-NEXT: movslq %r8d, %r10 +; ALL-NEXT: movslq %esi, %rbx +; ALL-NEXT: movslq %edx, %r11 +; ALL-NEXT: movslq %ecx, %r10 +; ALL-NEXT: movslq %r8d, %rdi ; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; ALL-NEXT: movslq %r9d, %r8 -; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdi -; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rcx +; ALL-NEXT: movslq %r9d, %rcx +; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rsi ; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; ALL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] -; ALL-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; ALL-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3] -; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3] -; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: popq %rbx ; ALL-NEXT: retq %x0 = extractelement <4 x float> %x, i32 %i0 %x1 = extractelement <4 x float> %x, i32 %i1 @@ -335,26 +335,19 @@ ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: movslq 40(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 48(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 56(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 64(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 72(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 80(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 88(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq %edi, %rax ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm1 @@ -369,11 +362,9 @@ ; AVX1-NEXT: movslq %r9d, %rax ; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq 16(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq 24(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp @@ -390,26 +381,19 @@ ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: movslq 40(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 48(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 56(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 64(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 72(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 80(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 88(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq %edi, %rax ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm1 @@ -424,11 +408,9 @@ ; AVX2-NEXT: movslq %r9d, %rax ; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq 16(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq 24(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp @@ -476,26 +458,19 @@ ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq %edi, %rax ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm1 @@ -510,11 +485,9 @@ ; AVX1-NEXT: movslq %r9d, %rax ; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -525,26 +498,19 @@ ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq %edi, %rax ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm1 @@ -559,11 +525,9 @@ ; AVX2-NEXT: movslq %r9d, %rax ; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %x0 = extractelement <8 x i16> %x, i32 %i0 @@ -619,11 +583,11 @@ ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -641,11 +605,11 @@ ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq @@ -678,11 +642,11 @@ ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64: @@ -694,11 +658,11 @@ ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %p0 = getelementptr inbounds i64, i64* %i, i32 0 %p1 = getelementptr inbounds i64, i64* %i, i32 1 Index: test/CodeGen/XCore/varargs.ll =================================================================== --- test/CodeGen/XCore/varargs.ll +++ test/CodeGen/XCore/varargs.ll @@ -26,10 +26,10 @@ ; CHECK-LABEL: test_vararg ; CHECK: extsp 6 ; CHECK: stw lr, sp[1] +; CHECK: stw r3, sp[6] ; CHECK: stw r0, sp[3] ; CHECK: stw r1, sp[4] ; CHECK: stw r2, sp[5] -; CHECK: stw r3, sp[6] ; CHECK: ldaw r0, sp[3] ; CHECK: stw r0, sp[2] %list = alloca i8*, align 4 Index: test/DebugInfo/X86/dbg-value-dag-combine.ll =================================================================== --- test/DebugInfo/X86/dbg-value-dag-combine.ll +++ test/DebugInfo/X86/dbg-value-dag-combine.ll @@ -13,7 +13,7 @@ %1 = extractelement <4 x i32> %0, i32 0 call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !9, metadata !DIExpression()), !dbg !11 call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !13, metadata !DIExpression()), !dbg !14 - %tmp2 = load i32, i32 addrspace(1)* %ip, align 4, !dbg !15 + %tmp2 = load volatile i32, i32 addrspace(1)* %ip, align 4, !dbg !15 %tmp3 = add i32 0, %tmp2, !dbg !15 ; CHECK: ##DEBUG_VALUE: idx <- %E{{..$}} call void @llvm.dbg.value(metadata i32 %tmp3, i64 0, metadata !13, metadata !DIExpression()), !dbg !15