Index: include/llvm/Analysis/LoopAccessAnalysis.h =================================================================== --- include/llvm/Analysis/LoopAccessAnalysis.h +++ include/llvm/Analysis/LoopAccessAnalysis.h @@ -667,6 +667,21 @@ const ValueToValueMap &StridesMap = ValueToValueMap(), bool Assume = false, bool ShouldCheckWrap = true); +/// \brief Attempt to sort the 'loads' in \p VL and return the sorted values in +/// \p Sorted. +/// +/// Returns 'false' if sorting is not legal or feasible, otherwise returns +/// 'true'. If \p Mask is not null, it also returns the \p Mask which is the +/// shuffle mask for actual memory access order. +/// +/// For example, for a given VL of memory accesses in program order, a[i+2], +/// a[i+0], a[i+1] and a[i+3], this function will sort the VL and save the +/// sorted value in 'Sorted' as a[i+0], a[i+1], a[i+2], a[i+3] and saves the +/// mask for actual memory accesses in program order in 'Mask' as <2,0,1,3> +bool sortLoadAccesses(ArrayRef VL, const DataLayout &DL, + ScalarEvolution &SE, SmallVectorImpl &Sorted, + SmallVectorImpl *Mask = nullptr); + /// \brief Returns true if the memory operations \p A and \p B are consecutive. /// This is a simple API that does not depend on the analysis pass. bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -1107,6 +1107,76 @@ return -1; } +// TODO:This API can be improved by using the permutation of given width as the +// accesses are entered into the map. +bool llvm::sortLoadAccesses(ArrayRef VL, const DataLayout &DL, + ScalarEvolution &SE, + SmallVectorImpl &Sorted, + SmallVectorImpl *Mask) { + SmallVector, 4> OffValPairs; + OffValPairs.reserve(VL.size()); + Sorted.reserve(VL.size()); + + // Walk over the pointers, and map each of them to an offset relative to + // first pointer in the array. + Value *Ptr0 = getPointerOperand(VL[0]); + const SCEV *Scev0 = SE.getSCEV(Ptr0); + Value *Obj0 = GetUnderlyingObject(Ptr0, DL); + PointerType *PtrTy = dyn_cast(Ptr0->getType()); + uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); + + for (auto *Val : VL) { + // The only kind of access we care about here is load. + if (!isa(Val)) + return false; + + Value *Ptr = getPointerOperand(Val); + assert(Ptr && "Expected value to have a pointer operand."); + // If a pointer refers to a different underlying object, bail - the + // pointers are by definition incomparable. + Value *CurrObj = GetUnderlyingObject(Ptr, DL); + if (CurrObj != Obj0) + return false; + + const SCEVConstant *Diff = + dyn_cast(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0)); + // The pointers may not have a constant offset from each other, or SCEV + // may just not be smart enough to figure out they do. Regardless, + // there's nothing we can do. + if (!Diff || Diff->getAPInt().abs().getSExtValue() > (VL.size() - 1) * Size) + return false; + + OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val); + } + SmallVector UseOrder(VL.size()); + for (unsigned i = 0; i < VL.size(); i++) { + UseOrder[i] = i; + } + + // Sort the memory accesses and keep the order of their uses in UseOrder. + std::sort(UseOrder.begin(), UseOrder.end(), + [&OffValPairs](unsigned Left, unsigned Right) { + return OffValPairs[Left].first < OffValPairs[Right].first; + }); + + for (unsigned i = 0; i < VL.size(); i++) + Sorted.emplace_back(OffValPairs[UseOrder[i]].second); + + // Sort UseOrder to compute the Mask. + if (Mask) { + Mask->reserve(VL.size()); + for (unsigned i = 0; i < VL.size(); i++) + Mask->emplace_back(i); + std::sort(Mask->begin(), Mask->end(), + [&UseOrder](unsigned Left, unsigned Right) { + return UseOrder[Left] < UseOrder[Right]; + }); + } + + return true; +} + + /// Returns true if the memory operations \p A and \p B are consecutive. bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType) { Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -684,17 +684,23 @@ int getEntryCost(TreeEntry *E); /// This is the recursive part of buildTree. - void buildTree_rec(ArrayRef Roots, unsigned Depth, int); + void buildTree_rec(ArrayRef Roots, unsigned Depth, int UserIndx = -1, + int OpdNum = 0); /// \returns True if the ExtractElement/ExtractValue instructions in VL can /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). bool canReuseExtract(ArrayRef VL, Value *OpValue) const; - /// Vectorize a single entry in the tree. - Value *vectorizeTree(TreeEntry *E); + /// Vectorize a single entry in the tree.\p OpdNum indicate the ordinality of + /// operand corrsponding to this tree entry \p E for the user tree entry + /// indicated by \p UserIndx. + // In other words, "E == TreeEntry[UserIndx].getOperand(OpdNum)". + Value *vectorizeTree(TreeEntry *E, int OpdNum = 0, int UserIndx = -1); - /// Vectorize a single entry in the tree, starting in \p VL. - Value *vectorizeTree(ArrayRef VL); + /// Vectorize a single entry in the tree, starting in \p VL.\p OpdNum indicate + /// the ordinality of operand corrsponding to the \p VL of scalar values for the + /// user indicated by \p UserIndx this \p VL feeds into. + Value *vectorizeTree(ArrayRef VL, int OpdNum = 0, int UserIndx = -1); /// \returns the pointer to the vectorized value if \p VL is already /// vectorized, or NULL. They may happen in cycles. @@ -732,7 +738,7 @@ SmallVectorImpl &Left, SmallVectorImpl &Right); struct TreeEntry { - TreeEntry(std::vector &Container) : Container(Container) {} + TreeEntry(std::vector &Container) : ShuffleMask(), Container(Container) {} /// \returns true if the scalars in VL are equal to this entry. bool isSame(ArrayRef VL) const { @@ -740,6 +746,16 @@ return std::equal(VL.begin(), VL.end(), Scalars.begin()); } + /// \returns true if the scalars in VL are found in this tree entry. + bool isFoundJumbled(ArrayRef VL, const DataLayout &DL, + ScalarEvolution &SE) const { + assert(VL.size() == Scalars.size() && "Invalid size"); + SmallVector List; + if (!sortLoadAccesses(VL, DL, SE, List)) + return false; + return std::equal(List.begin(), List.end(), Scalars.begin()); + } + /// A vector of scalars. ValueList Scalars; @@ -749,6 +765,14 @@ /// Do we need to gather this sequence ? bool NeedToGather = false; + /// Records optional shuffle mask for the uses of jumbled memory accesses. + /// For example, a non-empty ShuffleMask[1] represents the permutation of + /// lanes that operand #1 of this vectorized instruction should undergo + /// before feeding this vectorized instruction, whereas an empty + /// ShuffleMask[0] indicates that the lanes of operand #0 of this vectorized + /// instruction need not be permuted at all. + SmallVector ShuffleMask[3]; + /// Points back to the VectorizableTree. /// /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has @@ -767,14 +791,25 @@ /// Create a new VectorizableTree entry. TreeEntry *newTreeEntry(ArrayRef VL, bool Vectorized, - int &UserTreeIdx, const InstructionsState &S) { + int &UserTreeIdx, const InstructionsState &S, + ArrayRef ShuffleMask = None, + int OpdNum = 0) { assert((!Vectorized || S.Opcode != 0) && "Vectorized TreeEntry without opcode"); VectorizableTree.emplace_back(VectorizableTree); + int idx = VectorizableTree.size() - 1; TreeEntry *Last = &VectorizableTree[idx]; Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); Last->NeedToGather = !Vectorized; + + TreeEntry *UserEntry = &VectorizableTree[UserTreeIdx]; + if (!ShuffleMask.empty()) { + assert(UserEntry->ShuffleMask[OpdNum].empty() && "Mask already present!"); + UserEntry->ShuffleMask[OpdNum].insert( + UserEntry->ShuffleMask[OpdNum].begin(), ShuffleMask.begin(), + ShuffleMask.end()); + } if (Vectorized) { Last->State = S; unsigned AltOpcode = getAltOpcode(S.Opcode); @@ -1492,7 +1527,7 @@ } void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, - int UserTreeIdx) { + int UserTreeIdx, int OpdNum) { assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); InstructionsState S = getSameOpcode(VL); @@ -1646,7 +1681,7 @@ Operands.push_back(cast(j)->getIncomingValueForBlock( PH->getIncomingBlock(i))); - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; } @@ -1692,8 +1727,6 @@ } // Check if the loads are consecutive, reversed, or neither. - // TODO: What we really want is to sort the loads, but for now, check - // the two likely directions. bool Consecutive = true; bool ReverseConsecutive = true; for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { @@ -1721,15 +1754,41 @@ break; } - BS.cancelScheduling(VL, VL0); - newTreeEntry(VL, false, UserTreeIdx, S); - if (ReverseConsecutive) { - ++NumLoadsWantToChangeOrder; DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); - } else { - DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); + ++NumLoadsWantToChangeOrder; + BS.cancelScheduling(VL, VL0); + newTreeEntry(VL, false, UserTreeIdx, S); + return; + } + + if (VL.size() > 2) { + bool ShuffledLoads = true; + SmallVector Sorted; + SmallVector Mask; + if (sortLoadAccesses(VL, *DL, *SE, Sorted, &Mask)) { + auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end()); + for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) { + if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) { + ShuffledLoads = false; + break; + } + } + // TODO: Tracking how many load wants to have arbitrary shuffled order + // would be usefull. + if (ShuffledLoads) { + DEBUG(dbgs() << "SLP: added a vector of loads which needs " + "permutation of loaded lanes.\n"); + newTreeEntry(NewVL, true, UserTreeIdx, S, + makeArrayRef(Mask.begin(), Mask.end()), OpdNum); + return; + } + } } + + DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); + BS.cancelScheduling(VL, VL0); + newTreeEntry(VL, false, UserTreeIdx, S); return; } case Instruction::ZExt: @@ -1763,7 +1822,7 @@ for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; } @@ -1792,7 +1851,7 @@ for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; } @@ -1824,7 +1883,7 @@ ValueList Left, Right; reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); buildTree_rec(Left, Depth + 1, UserTreeIdx); - buildTree_rec(Right, Depth + 1, UserTreeIdx); + buildTree_rec(Right, Depth + 1, UserTreeIdx, 1); return; } @@ -1845,7 +1904,7 @@ Operands.push_back(Operand); } - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; @@ -1893,7 +1952,7 @@ for (Value *j : VL) Operands.push_back(cast(j)->getOperand(i)); - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; } @@ -1978,7 +2037,7 @@ CallInst *CI2 = dyn_cast(j); Operands.push_back(CI2->getArgOperand(i)); } - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; } @@ -1999,7 +2058,7 @@ ValueList Left, Right; reorderAltShuffleOperands(S.Opcode, VL, Left, Right); buildTree_rec(Left, Depth + 1, UserTreeIdx); - buildTree_rec(Right, Depth + 1, UserTreeIdx); + buildTree_rec(Right, Depth + 1, UserTreeIdx, 1); return; } @@ -2020,7 +2079,7 @@ Operands.push_back(Operand); } - buildTree_rec(Operands, Depth + 1, UserTreeIdx); + buildTree_rec(Operands, Depth + 1, UserTreeIdx, i); } return; @@ -2871,12 +2930,15 @@ return nullptr; } -Value *BoUpSLP::vectorizeTree(ArrayRef VL) { +Value *BoUpSLP::vectorizeTree(ArrayRef VL, int OpdNum, int UserIndx) { InstructionsState S = getSameOpcode(VL); if (S.Opcode) { if (TreeEntry *E = getTreeEntry(S.OpValue)) { - if (E->isSame(VL)) - return vectorizeTree(E); + TreeEntry *UserTreeEntry = &VectorizableTree[UserIndx]; + if (E->isSame(VL) || + (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty() && + E->isFoundJumbled(VL, *DL, *SE))) + return vectorizeTree(E, OpdNum, UserIndx); } } @@ -2888,9 +2950,11 @@ return Gather(VL, VecTy); } -Value *BoUpSLP::vectorizeTree(TreeEntry *E) { +Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) { IRBuilder<>::InsertPointGuard Guard(Builder); + int CurrIndx = ScalarToTreeEntry[E->Scalars[0]]; + TreeEntry *UserTreeEntry = nullptr; if (E->VectorizedValue) { DEBUG(dbgs() << "SLP: Diamond merged for " << *E->State.OpValue << ".\n"); return E->VectorizedValue; @@ -2938,7 +3002,7 @@ Builder.SetInsertPoint(IBB->getTerminator()); Builder.SetCurrentDebugLocation(PH->getDebugLoc()); - Value *Vec = vectorizeTree(Operands); + Value *Vec = vectorizeTree(Operands, i, CurrIndx); NewPhi->addIncoming(Vec, IBB); } @@ -2991,7 +3055,7 @@ setInsertPointAfterBundle(E->Scalars, VL0); - Value *InVec = vectorizeTree(INVL); + Value *InVec = vectorizeTree(INVL, 0, CurrIndx); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3012,8 +3076,8 @@ setInsertPointAfterBundle(E->Scalars, VL0); - Value *L = vectorizeTree(LHSV); - Value *R = vectorizeTree(RHSV); + Value *L = vectorizeTree(LHSV, 0, CurrIndx); + Value *R = vectorizeTree(RHSV, 1, CurrIndx); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3040,9 +3104,9 @@ setInsertPointAfterBundle(E->Scalars, VL0); - Value *Cond = vectorizeTree(CondVec); - Value *True = vectorizeTree(TrueVec); - Value *False = vectorizeTree(FalseVec); + Value *Cond = vectorizeTree(CondVec, 0, CurrIndx); + Value *True = vectorizeTree(TrueVec, 1, CurrIndx); + Value *False = vectorizeTree(FalseVec, 2, CurrIndx); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3089,8 +3153,8 @@ setInsertPointAfterBundle(E->Scalars, VL0); - Value *LHS = vectorizeTree(LHSVL); - Value *RHS = vectorizeTree(RHSVL); + Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx); + Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3111,7 +3175,17 @@ // sink them all the way down past store instructions. setInsertPointAfterBundle(E->Scalars, VL0); - LoadInst *LI = cast(VL0); + if(UserIndx != -1) { + UserTreeEntry = &VectorizableTree[UserIndx]; + } + + LoadInst *LI = NULL; + if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) { + LI = cast(E->Scalars[0]); + } else { + LI = cast(VL0); + } + Type *ScalarLoadTy = LI->getType(); unsigned AS = LI->getPointerAddressSpace(); @@ -3133,7 +3207,24 @@ LI->setAlignment(Alignment); E->VectorizedValue = LI; ++NumVectorInstructions; - return propagateMetadata(LI, E->Scalars); + propagateMetadata(LI, E->Scalars); + + if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) { + SmallVector Mask; + for (unsigned Lane = 0, LE = UserTreeEntry->ShuffleMask[OpdNum].size(); + Lane != LE; ++Lane) { + Mask.push_back( + Builder.getInt32(UserTreeEntry->ShuffleMask[OpdNum][Lane])); + } + // Generate shuffle for jumbled memory access + Value *Undef = UndefValue::get(VecTy); + Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef, + ConstantVector::get(Mask)); + E->VectorizedValue = Shuf; + ++NumVectorInstructions; + return Shuf; + } + return LI; } case Instruction::Store: { StoreInst *SI = cast(VL0); @@ -3146,7 +3237,7 @@ setInsertPointAfterBundle(E->Scalars, VL0); - Value *VecValue = vectorizeTree(ValueOp); + Value *VecValue = vectorizeTree(ValueOp, 0, CurrIndx); Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), VecTy->getPointerTo(AS)); StoreInst *S = Builder.CreateStore(VecValue, VecPtr); @@ -3173,7 +3264,7 @@ for (Value *V : E->Scalars) Op0VL.push_back(cast(V)->getOperand(0)); - Value *Op0 = vectorizeTree(Op0VL); + Value *Op0 = vectorizeTree(Op0VL, 0, CurrIndx); std::vector OpVecs; for (int j = 1, e = cast(VL0)->getNumOperands(); j < e; @@ -3182,7 +3273,7 @@ for (Value *V : E->Scalars) OpVL.push_back(cast(V)->getOperand(j)); - Value *OpVec = vectorizeTree(OpVL); + Value *OpVec = vectorizeTree(OpVL, j, CurrIndx); OpVecs.push_back(OpVec); } @@ -3221,7 +3312,7 @@ OpVL.push_back(CEI->getArgOperand(j)); } - Value *OpVec = vectorizeTree(OpVL); + Value *OpVec = vectorizeTree(OpVL, j, CurrIndx); DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); OpVecs.push_back(OpVec); } @@ -3252,8 +3343,8 @@ reorderAltShuffleOperands(E->State.Opcode, E->Scalars, LHSVL, RHSVL); setInsertPointAfterBundle(E->Scalars, VL0); - Value *LHS = vectorizeTree(LHSVL); - Value *RHS = vectorizeTree(RHSVL); + Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx); + Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx); if (Value *V = alreadyVectorized(E->Scalars, VL0)) return V; @@ -3360,7 +3451,13 @@ assert(E && "Invalid scalar"); assert(!E->NeedToGather && "Extracting from a gather list"); - Value *Vec = E->VectorizedValue; + Value *Vec = nullptr; + if ((Vec = dyn_cast(E->VectorizedValue)) && + dyn_cast(cast(Vec)->getOperand(0))) { + Vec = cast(E->VectorizedValue)->getOperand(0); + } else { + Vec = E->VectorizedValue; + } assert(Vec && "Can't find vectorizable value"); Value *Lane = Builder.getInt32(ExternalUse.Lane); Index: test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll +++ test/Transforms/SLPVectorizer/X86/jumbled-load-multiuse.ll @@ -11,20 +11,16 @@ define i32 @fn1() { ; CHECK-LABEL: @fn1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 0), align 4 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 1), align 4 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 2), align 4 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 3), align 4 -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP2]], i32 1 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP0]], i32 3 -; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[TMP7]], zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 8, i32 3 -; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP11]], <4 x i32> -; CHECK-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 8, i32 3 +; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP7]], <4 x i32> +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 ; CHECK-NEXT: ret i32 0 ; entry: Index: test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll =================================================================== --- /dev/null +++ test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s + + +;void jumble (int * restrict A, int * restrict B) { + ; int tmp0 = A[10]*A[0]; + ; int tmp1 = A[11]*A[1]; + ; int tmp2 = A[12]*A[3]; + ; int tmp3 = A[13]*A[2]; + ; B[0] = tmp0; + ; B[1] = tmp1; + ; B[2] = tmp2; + ; B[3] = tmp3; + ;} + ; Function Attrs: norecurse nounwind uwtable + define void @jumble(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) { +; CHECK-LABEL: @jumble( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 10 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 11 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 12 +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>* +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to <4 x i32>* +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[TMP4]], [[TMP1]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[B]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4 +; CHECK-NEXT: ret void +; +entry: + %arrayidx = getelementptr inbounds i32, i32* %A, i64 10 + %0 = load i32, i32* %arrayidx, align 4 + %1 = load i32, i32* %A, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 11 + %2 = load i32, i32* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1 + %3 = load i32, i32* %arrayidx3, align 4 + %mul4 = mul nsw i32 %3, %2 + %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 12 + %4 = load i32, i32* %arrayidx5, align 4 + %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 3 + %5 = load i32, i32* %arrayidx6, align 4 + %mul7 = mul nsw i32 %5, %4 + %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 13 + %6 = load i32, i32* %arrayidx8, align 4 + %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 2 + %7 = load i32, i32* %arrayidx9, align 4 + %mul10 = mul nsw i32 %7, %6 + store i32 %mul, i32* %B, align 4 + %arrayidx12 = getelementptr inbounds i32, i32* %B, i64 1 + store i32 %mul4, i32* %arrayidx12, align 4 + %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 2 + store i32 %mul7, i32* %arrayidx13, align 4 + %arrayidx14 = getelementptr inbounds i32, i32* %B, i64 3 + store i32 %mul10, i32* %arrayidx14, align 4 + ret void + } + Index: test/Transforms/SLPVectorizer/X86/jumbled-load.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/jumbled-load.ll +++ test/Transforms/SLPVectorizer/X86/jumbled-load.ll @@ -5,34 +5,27 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { ; CHECK-LABEL: @jumbled-load( -; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* %in, i64 0 -; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4 +; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 -; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 -; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 -; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 -; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* %inn, i64 0 -; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>* +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 -; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 -; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 -; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 -; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_3]], [[LOAD_5]] -; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_8]] -; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_7]] -; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_6]] -; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* %out, i64 0 -; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4 -; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* %out, i64 1 -; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4 -; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* %out, i64 2 -; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4 -; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* %out, i64 3 -; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>* +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]] +; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0 +; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1 +; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2 +; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4 ; CHECK-NEXT: ret i32 undef ; %in.addr = getelementptr inbounds i32, i32* %in, i64 0 Index: test/Transforms/SLPVectorizer/X86/store-jumbled.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/store-jumbled.ll +++ test/Transforms/SLPVectorizer/X86/store-jumbled.ll @@ -6,33 +6,26 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { ; CHECK-LABEL: @jumbled-load( ; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0 -; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 -; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 -; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 -; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>* +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> ; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0 -; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 -; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 -; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 -; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 -; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_1]], [[LOAD_5]] -; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_6]] -; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_3]], [[LOAD_7]] -; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_4]], [[LOAD_8]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>* +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]] ; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0 ; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1 ; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2 ; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3 -; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_9]], align 4 -; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_7]], align 4 -; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_10]], align 4 -; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_8]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4 ; CHECK-NEXT: ret i32 undef ; %in.addr = getelementptr inbounds i32, i32* %in, i64 0