Index: llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -124,6 +124,11 @@ cl::desc( "Attempt to vectorize horizontal reductions feeding into a store")); +static cl::opt + SLPThrottleBudget("slp-throttling-budget", cl::init(32), cl::Hidden, + cl::desc("Limit the total number of nodes for cost " + "recalculations during throttling")); + static cl::opt MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, cl::desc("Attempt to vectorize for this register size in bits")); @@ -589,11 +594,61 @@ /// \returns the cost incurred by unwanted spills and fills, caused by /// holding live values over call sites. - InstructionCost getSpillCost() const; + InstructionCost getSpillCost(); + + /// \returns the cost extracting vectorized elements. + InstructionCost + getExtractCost(SmallPtrSetImpl &ProposedToGather) const; + + /// \returns the cost of gathering canceled elements to be used + /// by vectorized operations during throttling. + InstructionCost getInsertCost(SmallPtrSetImpl &ProposedToGather); + + struct TECostComparator { + bool operator()(const TreeEntry *LHS, const TreeEntry *RHS) const { + return LHS->Cost > RHS->Cost; + } + }; + using TEVectorizableSet = std::set; + + /// Find a subtree of the whole tree suitable to be vectorized. When + /// vectorizing the whole tree is not profitable, we can consider vectorizing + /// part of that tree. SLP algorithm looks to operations to vectorize starting + /// from seed instructions on the bottom toward the end of chains of + /// dependencies to the top of SLP graph, it groups potentially vectorizable + /// operations in scalar form to bundles. + /// For example: + /// + /// vector form + /// | + /// vector form vector form + /// \ / + /// vector form + /// + /// Total cost is not profitable to vectorize, hence all operations are in + /// scalar form. + /// + /// Here is the same tree after SLP throttling transformation: + /// + /// vector form + /// | + /// vector form gathered nodes + /// \ / + /// vector form + /// + /// So, we can throttle some operations in such a way that it is still + /// profitable to vectorize part on the tree, while all tree vectorization + /// does not make sense. + /// More details: https://www.cl.cam.ac.uk/~tmj32/papers/docs/porpodas15-pact.pdf + bool findSubTree(TEVectorizableSet &Vec, InstructionCost UserCost); + + /// Get raw summary of all elements of the tree. + InstructionCost getRawTreeCost(); /// \returns the vectorization cost of the subtree that starts at \p VL. /// A negative number means that this is profitable. - InstructionCost getTreeCost(); + InstructionCost getTreeCost(bool TreeReduce = false, + InstructionCost UserCost = 0); /// Construct a vectorizable tree that starts at \p Roots, ignoring users for /// the purpose of scheduling and extraction in the \p UserIgnoreLst. @@ -614,6 +669,8 @@ ScalarToTreeEntry.clear(); MustGather.clear(); ExternalUses.clear(); + InternalTreeUses.clear(); + RemovedOperations.clear(); NumOpsWantToKeepOrder.clear(); NumOpsWantToKeepOriginalOrder = 0; for (auto &Iter : BlocksSchedules) { @@ -621,6 +678,11 @@ BS->clear(); } MinBWs.clear(); + NoCallInst = true; + RawTreeCost = 0; + TreeCost = 0; + PartialCost = 0; + IsCostSumReady = false; } unsigned getTreeSize() const { return VectorizableTree.size(); } @@ -783,6 +845,9 @@ /// may not be necessary. bool isLoadCombineCandidate() const; + /// Cut the tree to make it partially vectorizable. + void cutTree(SmallPtrSetImpl &ProposedToGather); + OptimizationRemarkEmitter *getORE() { return ORE; } /// This structure holds any data we need about the edges being traversed @@ -1585,6 +1650,9 @@ /// Does this entry require reordering? SmallVector ReorderIndices; + /// Cost of this tree entry. + InstructionCost Cost = 0; + /// Points back to the VectorizableTree. /// /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has @@ -1597,6 +1665,9 @@ /// have multiple users so the data structure is not truly a tree. SmallVector UserTreeIndices; + /// Use of this entry. + TinyPtrVector UseEntries; + /// The index of this treeEntry in VectorizableTree. int Idx = -1; @@ -1829,8 +1900,10 @@ MustGather.insert(VL.begin(), VL.end()); } - if (UserTreeIdx.UserTE) + if (UserTreeIdx.UserTE) { Last->UserTreeIndices.push_back(UserTreeIdx); + VectorizableTree[UserTreeIdx.UserTE->Idx]->UseEntries.push_back(Last); + } return Last; } @@ -1880,6 +1953,9 @@ }; using UserList = SmallVector; + /// \returns the cost of extracting the vectorized elements. + InstructionCost getExtractOperationCost(const ExternalUser &EU) const; + /// Checks if two instructions may access the same memory. /// /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it @@ -1930,6 +2006,31 @@ /// after vectorization. UserList ExternalUses; + /// Tree entries that should not be vectorized due to throttling. + SmallVector RemovedOperations; + + /// Raw cost of all elemts in the tree. + InstructionCost RawTreeCost = 0; + + /// Final cost of the tree. + InstructionCost TreeCost = 0; + + /// Partail cost of the tree. + InstructionCost PartialCost = 0; + + /// Indicate that no CallInst found in the tree and we don't need to + /// calculate spill cost. + bool NoCallInst = true; + + /// True, if we have calucalte tree cost for the tree. + bool IsCostSumReady = false; + + /// Current operations width to vectorize. + unsigned BundleWidth = 0; + + /// Internal tree oprations proposed to be vectorized values use. + SmallDenseMap InternalTreeUses; + /// Values used only by @llvm.assume calls. SmallPtrSet EphValues; @@ -2334,6 +2435,9 @@ /// performed in a basic block. void scheduleBlock(BlockScheduling *BS); + /// Remove operations from the list of proposed to schedule. + void removeFromScheduling(BlockScheduling *BS); + /// List of users to ignore during scheduling and that don't need extracting. ArrayRef UserIgnoreList; @@ -2537,7 +2641,7 @@ buildTree_rec(Roots, 0, EdgeInfo()); // Collect the values that we need to extract from the tree. - for (auto &TEPtr : VectorizableTree) { + for (std::unique_ptr &TEPtr : VectorizableTree) { TreeEntry *Entry = TEPtr.get(); // No need to handle users of gathered values. @@ -2574,6 +2678,7 @@ // Some in-tree scalars will remain as scalar in vectorized // instructions. If that is the case, the one in Lane 0 will // be used. + InternalTreeUses[U].emplace_back(Scalar, U, FoundLane); if (UseScalar != U || UseEntry->State == TreeEntry::ScatterVectorize || !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { @@ -3308,6 +3413,50 @@ } } +void BoUpSLP::cutTree(SmallPtrSetImpl &ProposedToGather) { + SmallVector VecNodes; + + for (std::unique_ptr &TEPtr : VectorizableTree) { + TreeEntry *Entry = TEPtr.get(); + if (Entry->State != TreeEntry::Vectorize && + Entry->State != TreeEntry::ScatterVectorize) + continue; + // For all canceled operations we should consider the possibility of + // use by with non-canceled operations and for that, it requires + // to populate ExternalUser list with canceled elements. + for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { + Value *Scalar = Entry->Scalars[Lane]; + for (User *U : Scalar->users()) { + LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); + TreeEntry *UserTE = getTreeEntry(U); + if (!UserTE || ProposedToGather.count(UserTE) == 0) + continue; + // Ignore users in the user ignore list. + auto *UserInst = dyn_cast(U); + if (!UserInst) + continue; + + if (is_contained(UserIgnoreList, UserInst)) + continue; + LLVM_DEBUG(dbgs() << "SLP: Need extract to canceled operation :" << *U + << " from lane " << Lane << " from " << *Scalar + << ".\n"); + ExternalUses.emplace_back(Scalar, U, Lane); + } + } + } + // Canceling unprofitable elements. + for (TreeEntry *Entry : ProposedToGather) { + for (Value *V : Entry->Scalars) { + ScalarToTreeEntry.erase(V); +#ifndef NDEBUG + LLVM_DEBUG(dbgs() << "SLP: Remove scalar " << *V + << " out of proposed to vectorize.\n"); +#endif + } + } +} + unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { unsigned N = 1; Type *EltTy = T; @@ -3967,12 +4116,11 @@ return true; } -InstructionCost BoUpSLP::getSpillCost() const { +InstructionCost BoUpSLP::getSpillCost() { // Walk from the bottom of the tree to the top, tracking which values are // live. When we see a call instruction that is not part of our tree, // query TTI to see if there is a cost to keeping values live over it // (for example, if spills and fills are required). - unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); InstructionCost Cost = 0; SmallPtrSet LiveValues; @@ -4037,6 +4185,7 @@ } if (NumCalls) { + NoCallInst = false; SmallVector V; for (auto *II : LiveValues) V.push_back(FixedVectorType::get(II->getType(), BundleWidth)); @@ -4049,15 +4198,110 @@ return Cost; } -InstructionCost BoUpSLP::getTreeCost() { - InstructionCost Cost = 0; +InstructionCost BoUpSLP::getExtractOperationCost(const ExternalUser &EU) const { + // Uses by ephemeral values are free (because the ephemeral value will be + // removed prior to code generation, and so the extraction will be + // removed as well). + if (EphValues.count(EU.User)) + return 0; + + // If we plan to rewrite the tree in a smaller type, we will need to sign + // extend the extracted value back to the original type. Here, we account + // for the extract and the added cost of the sign extend if needed. + auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); + Value *ScalarRoot = VectorizableTree.front()->Scalars[0]; + + auto It = MinBWs.find(ScalarRoot); + if (It != MinBWs.end()) { + uint64_t Width = It->second.first; + bool Signed = It->second.second; + auto *MinTy = IntegerType::get(F->getContext(), Width); + unsigned ExtOp = Signed ? Instruction::SExt : Instruction::ZExt; + VecTy = FixedVectorType::get(MinTy, BundleWidth); + return (TTI->getExtractWithExtendCost(ExtOp, EU.Scalar->getType(), VecTy, + EU.Lane)); + } + return TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); +} + +InstructionCost +BoUpSLP::getExtractCost(SmallPtrSetImpl &ProposedToGather) const { + InstructionCost ExtractCost = 0; + SmallPtrSet ExtractCostCalculated; + // Consider the possibility of extracting vectorized + // values for canceled elements use. + for (TreeEntry *Entry : ProposedToGather) { + for (Value *V : Entry->Scalars) { + // Consider the possibility of extracting vectorized + // values for canceled elements use. + auto It = InternalTreeUses.find(V); + if (It != InternalTreeUses.end()) { + const UserList &UL = It->second; + for (const ExternalUser &IU : UL) + ExtractCost += getExtractOperationCost(IU); + } + } + } + for (const ExternalUser &EU : ExternalUses) { + // We only add extract cost once for the same scalar. + if (!ExtractCostCalculated.insert(EU.Scalar).second) + continue; + + ExtractCost += getExtractOperationCost(EU); + } + return ExtractCost; +} + +InstructionCost +BoUpSLP::getInsertCost(SmallPtrSetImpl &ProposedToGather) { + InstructionCost InsertCost = 0; + for (TreeEntry *Entry : ProposedToGather) { + // Avoid already vectorized TreeEntries, it is already in a vector form and + // we don't need to gather those operations. + if (ProposedToGather.count(Entry) == 0) + continue; + for (Value *V : Entry->Scalars) { + auto *Inst = cast(V); + if (llvm::any_of(Inst->users(), [this](User *Op) { + return ScalarToTreeEntry.count(Op) > 0; + })) { + InsertCost += getEntryCost(Entry); + break; + } + } + } + return InsertCost; +} + +bool BoUpSLP::findSubTree(TEVectorizableSet &Vec, InstructionCost UserCost) { + for (const std::unique_ptr &TEPtr : VectorizableTree) { + TreeEntry *Entry = TEPtr.get(); + // Ignore any non-vectoriable entries, entries with low cost, + // or simple entries with just one operand or less. + if ((Entry->State != TreeEntry::Vectorize && + Entry->State != TreeEntry::ScatterVectorize) || + Entry->Cost <= 0 || !Entry->Idx) + continue; + Vec.insert(Entry); + } + InstructionCost Sum = 0; + for (TreeEntry *Entry : Vec) + Sum += Entry->Cost; + // Avoid reducing the tree if there is no potential room to reduce. + if ((TreeCost - UserCost - Sum) >= -SLPCostThreshold) + return false; + + return (Vec.size() > 0); +} + +InstructionCost BoUpSLP::getRawTreeCost() { + InstructionCost CostSum = 0; + BundleWidth = VectorizableTree.front()->Scalars.size(); LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"); - unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); - - for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { - TreeEntry &TE = *VectorizableTree[I].get(); + for (std::unique_ptr &TEPtr : VectorizableTree) { + TreeEntry &TE = *TEPtr.get(); // We create duplicate tree entries for gather sequences that have multiple // uses. However, we should not compute the cost of duplicate sequences. @@ -4072,69 +4316,107 @@ // existing heuristics based on tree size may yield different results. // if (TE.State == TreeEntry::NeedToGather && - std::any_of(std::next(VectorizableTree.begin(), I + 1), - VectorizableTree.end(), - [TE](const std::unique_ptr &EntryPtr) { - return EntryPtr->State == TreeEntry::NeedToGather && - EntryPtr->isSame(TE.Scalars); - })) + llvm::any_of(llvm::drop_begin(VectorizableTree, TE.Idx + 1), + [TE](const std::unique_ptr &EntryPtr) { + return EntryPtr->State == TreeEntry::NeedToGather && + EntryPtr->isSame(TE.Scalars); + })) continue; - InstructionCost C = getEntryCost(&TE); - Cost += C; - LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C + TE.Cost = getEntryCost(&TE); + LLVM_DEBUG(dbgs() << "SLP: Adding cost " << TE.Cost << " for bundle that starts with " << *TE.Scalars[0] - << ".\n" - << "SLP: Current total cost = " << Cost << "\n"); + << ".\n"); + CostSum += TE.Cost; + LLVM_DEBUG(dbgs() << "SLP: Current total cost = " << CostSum << "\n"); } - SmallPtrSet ExtractCostCalculated; - InstructionCost ExtractCost = 0; - for (ExternalUser &EU : ExternalUses) { - // We only add extract cost once for the same scalar. - if (!ExtractCostCalculated.insert(EU.Scalar).second) - continue; - - // Uses by ephemeral values are free (because the ephemeral value will be - // removed prior to code generation, and so the extraction will be - // removed as well). - if (EphValues.count(EU.User)) - continue; + if (SLPThrottleBudget > 0) + for (std::unique_ptr &TEPtr : VectorizableTree) { + TreeEntry *TE = TEPtr.get(); + if (TE->State != TreeEntry::Vectorize && + TE->State != TreeEntry::ScatterVectorize) + continue; + InstructionCost GatherCost = 0; + for (TreeEntry *Gather : TE->UseEntries) + if (Gather->State != TreeEntry::Vectorize && + Gather->State != TreeEntry::ScatterVectorize) + GatherCost += Gather->Cost; + TE->Cost += GatherCost; + } + return CostSum; +} - // If we plan to rewrite the tree in a smaller type, we will need to sign - // extend the extracted value back to the original type. Here, we account - // for the extract and the added cost of the sign extend if needed. - auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); - auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; - if (MinBWs.count(ScalarRoot)) { - auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); - auto Extend = - MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; - VecTy = FixedVectorType::get(MinTy, BundleWidth); - ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), - VecTy, EU.Lane); - } else { - ExtractCost += - TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); - } +InstructionCost BoUpSLP::getTreeCost(bool TreeReduce, + InstructionCost UserCost) { + InstructionCost CostSum; + SmallPtrSet ProposedToGather; + if (!IsCostSumReady) { + CostSum = getRawTreeCost(); + RawTreeCost = CostSum; + } else { + CostSum = RawTreeCost; } - InstructionCost SpillCost = getSpillCost(); - Cost += SpillCost + ExtractCost; + InstructionCost ExtractCost = getExtractCost(ProposedToGather); + InstructionCost SpillCost = 0; + if (!NoCallInst || !IsCostSumReady) + SpillCost = getSpillCost(); + assert((!NoCallInst || getSpillCost() == 0) && "Incorrect spill cost"); + if (!IsCostSumReady) + IsCostSumReady = true; + InstructionCost InsertCost = getInsertCost(ProposedToGather); + InstructionCost Cost = CostSum + ExtractCost + SpillCost + InsertCost; + TreeCost = Cost; #ifndef NDEBUG SmallString<256> Str; - { - raw_svector_ostream OS(Str); - OS << "SLP: Spill Cost = " << SpillCost << ".\n" - << "SLP: Extract Cost = " << ExtractCost << ".\n" - << "SLP: Total Cost = " << Cost << ".\n"; - } + raw_svector_ostream OS(Str); + OS << "SLP: Spill Cost = " << SpillCost << ".\n" + << "SLP: Extract Cost = " << ExtractCost << ".\n" + << "SLP: Insert Cost = " << InsertCost << ".\n" + << "SLP: Total Cost = " << Cost << ".\n"; LLVM_DEBUG(dbgs() << Str); if (ViewSLPTree) ViewGraph(this, "SLP" + F->getName(), false, Str); #endif + if (TreeReduce && (Cost - UserCost) >= -SLPCostThreshold) { + TEVectorizableSet Vec; + if (!findSubTree(Vec, UserCost)) + return Cost; + if (!NoCallInst && Vec.size() > SLPThrottleBudget) { + std::set::iterator It = + Vec.begin(); + std::advance(It, (unsigned)SLPThrottleBudget); + Vec.erase(It, Vec.end()); + } + + for (TreeEntry *T : Vec) { + ProposedToGather.insert(T); + T->State = TreeEntry::NeedToGather; + for (Value *V : T->Scalars) { + MustGather.insert(V); + ExternalUses.erase( + llvm::remove_if(ExternalUses, + [V](ExternalUser &EU) { return EU.Scalar == V; }), + ExternalUses.end()); + } + CostSum -= T->Cost; + ExtractCost = getExtractCost(ProposedToGather); + if (!NoCallInst) + SpillCost = getSpillCost(); + assert((!NoCallInst || getSpillCost() == 0) && "Incorrect spill cost"); + InsertCost = getInsertCost(ProposedToGather); + Cost = CostSum + ExtractCost + SpillCost + InsertCost; + PartialCost = Cost - UserCost; + RemovedOperations.push_back(T); + if (PartialCost < -SLPCostThreshold) { + cutTree(ProposedToGather); + return PartialCost; + } + } + } return Cost; } @@ -4898,12 +5180,24 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { // All blocks must be scheduled before any instructions are inserted. for (auto &BSIter : BlocksSchedules) { - scheduleBlock(BSIter.second.get()); + BlockScheduling *BS = BSIter.second.get(); + // Remove all Schedule Data from all nodes that we have changed + // vectorization decision. + if (!RemovedOperations.empty()) + removeFromScheduling(BS); + scheduleBlock(BS); } Builder.SetInsertPoint(&F->getEntryBlock().front()); auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); + for (std::unique_ptr &TEPtr : VectorizableTree) { + TreeEntry *Entry = TEPtr.get(); + if ((Entry->State == TreeEntry::Vectorize || + Entry->State == TreeEntry::ScatterVectorize) && !Entry->VectorizedValue) + vectorizeTree(Entry); + } + // If the vectorized tree can be rewritten in a smaller type, we truncate the // vectorized root. InstCombine will then rewrite the entire expression. We // sign extend the extracted values below. @@ -5027,7 +5321,9 @@ #ifndef NDEBUG Type *Ty = Scalar->getType(); - if (!Ty->isVoidTy()) { + // The tree might not be fully vectorized, so we don't have to + // check every user. + if (!Ty->isVoidTy() && RemovedOperations.empty()) { for (User *U : Scalar->users()) { LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); @@ -5514,6 +5810,30 @@ ReadyInsts.clear(); } +void BoUpSLP::removeFromScheduling(BlockScheduling *BS) { + bool Removed = false; + for (TreeEntry *Entry : RemovedOperations) { + ScheduleData *SD = BS->getScheduleData(Entry->Scalars[0]); + if (SD && SD->isPartOfBundle()) { + if (!Removed) { + Removed = true; + BS->resetSchedule(); + } + BS->cancelScheduling(Entry->Scalars, SD->OpValue); + } + } + if (!Removed) + return; + BS->resetSchedule(); + BS->initialFillReadyList(BS->ReadyInsts); + for (Instruction *I = BS->ScheduleStart; I != BS->ScheduleEnd; + I = I->getNextNode()) { + if (BS->ScheduleDataMap.find(I) == BS->ScheduleDataMap.end()) + continue; + BS->doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); + } +} + void BoUpSLP::scheduleBlock(BlockScheduling *BS) { if (!BS->ScheduleStart) return; @@ -6039,7 +6359,7 @@ R.computeMinimumValueSizes(); - InstructionCost Cost = R.getTreeCost(); + InstructionCost Cost = R.getTreeCost(true); LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); if (Cost < -SLPCostThreshold) { @@ -6300,6 +6620,7 @@ R.computeMinimumValueSizes(); InstructionCost Cost = R.getTreeCost(); + InstructionCost UserCost = 0; CandidateFound = true; if (CompensateUseCost) { // TODO: Use TTI's getScalarizationOverhead for sequence of inserts @@ -6356,7 +6677,13 @@ I += VF - 1; NextInst = I + 1; Changed = true; - } + } else if (SLPThrottleBudget > 0 && + R.getTreeCost(true, UserCost) < -SLPCostThreshold) { + R.vectorizeTree(); + I += VF - 1; + NextInst = I + 1; + Changed = true; + } } } Index: llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll +++ llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll @@ -204,11 +204,15 @@ ; MAX-COST-LABEL: @PR32038( ; MAX-COST-NEXT: entry: ; MAX-COST-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* bitcast (i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 1) to <2 x i8>*), align 1 -; MAX-COST-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> [[TMP0]], zeroinitializer ; MAX-COST-NEXT: [[P4:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 3), align 1 -; MAX-COST-NEXT: [[P5:%.*]] = icmp eq i8 [[P4]], 0 ; MAX-COST-NEXT: [[P6:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 4), align 4 -; MAX-COST-NEXT: [[P7:%.*]] = icmp eq i8 [[P6]], 0 +; MAX-COST-NEXT: [[TMP1:%.*]] = extractelement <2 x i8> [[TMP0]], i32 0 +; MAX-COST-NEXT: [[TMP2:%.*]] = insertelement <4 x i8> poison, i8 [[TMP1]], i32 0 +; MAX-COST-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP0]], i32 1 +; MAX-COST-NEXT: [[TMP4:%.*]] = insertelement <4 x i8> [[TMP2]], i8 [[TMP3]], i32 1 +; MAX-COST-NEXT: [[TMP5:%.*]] = insertelement <4 x i8> [[TMP4]], i8 [[P4]], i32 2 +; MAX-COST-NEXT: [[TMP6:%.*]] = insertelement <4 x i8> [[TMP5]], i8 [[P6]], i32 3 +; MAX-COST-NEXT: [[TMP7:%.*]] = icmp eq <4 x i8> [[TMP6]], zeroinitializer ; MAX-COST-NEXT: [[P8:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 5), align 1 ; MAX-COST-NEXT: [[P9:%.*]] = icmp eq i8 [[P8]], 0 ; MAX-COST-NEXT: [[P10:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 6), align 2 @@ -220,19 +224,21 @@ ; MAX-COST-NEXT: br label [[FOR_BODY:%.*]] ; MAX-COST: for.body: ; MAX-COST-NEXT: [[P17:%.*]] = phi i32 [ [[P34:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] -; MAX-COST-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 -; MAX-COST-NEXT: [[TMP3:%.*]] = insertelement <4 x i1> poison, i1 [[TMP2]], i32 0 -; MAX-COST-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 -; MAX-COST-NEXT: [[TMP5:%.*]] = insertelement <4 x i1> [[TMP3]], i1 [[TMP4]], i32 1 -; MAX-COST-NEXT: [[TMP6:%.*]] = insertelement <4 x i1> [[TMP5]], i1 [[P5]], i32 2 -; MAX-COST-NEXT: [[TMP7:%.*]] = insertelement <4 x i1> [[TMP6]], i1 [[P7]], i32 3 -; MAX-COST-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP7]], <4 x i32> , <4 x i32> +; MAX-COST-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3 +; MAX-COST-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0 +; MAX-COST-NEXT: [[TMP10:%.*]] = insertelement <4 x i1> poison, i1 [[TMP9]], i32 0 +; MAX-COST-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1 +; MAX-COST-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> [[TMP10]], i1 [[TMP11]], i32 1 +; MAX-COST-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2 +; MAX-COST-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP13]], i32 2 +; MAX-COST-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP8]], i32 3 +; MAX-COST-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> , <4 x i32> ; MAX-COST-NEXT: [[P27:%.*]] = select i1 [[P9]], i32 -720, i32 -80 ; MAX-COST-NEXT: [[P29:%.*]] = select i1 [[P11]], i32 -720, i32 -80 -; MAX-COST-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP8]]) -; MAX-COST-NEXT: [[TMP10:%.*]] = add i32 [[TMP9]], [[P27]] -; MAX-COST-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], [[P29]] -; MAX-COST-NEXT: [[OP_EXTRA:%.*]] = add i32 [[TMP11]], -5 +; MAX-COST-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP16]]) +; MAX-COST-NEXT: [[TMP18:%.*]] = add i32 [[TMP17]], [[P27]] +; MAX-COST-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], [[P29]] +; MAX-COST-NEXT: [[OP_EXTRA:%.*]] = add i32 [[TMP19]], -5 ; MAX-COST-NEXT: [[P31:%.*]] = select i1 [[P13]], i32 -720, i32 -80 ; MAX-COST-NEXT: [[P32:%.*]] = add i32 [[OP_EXTRA]], [[P31]] ; MAX-COST-NEXT: [[P33:%.*]] = select i1 [[P15]], i32 -720, i32 -80 Index: llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll +++ llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll @@ -7,49 +7,65 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP15:%.*]], [[LOOP]] ], [ zeroinitializer, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <8 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[SHUFFLE]], i32 1 -; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i32> [[SHUFFLE]], -; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]]) -; CHECK-NEXT: [[OP_EXTRA:%.*]] = and i32 [[TMP4]], [[TMP0:%.*]] -; CHECK-NEXT: [[OP_EXTRA1:%.*]] = and i32 [[OP_EXTRA]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA2:%.*]] = and i32 [[OP_EXTRA1]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA3:%.*]] = and i32 [[OP_EXTRA2]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA4:%.*]] = and i32 [[OP_EXTRA3]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA5:%.*]] = and i32 [[OP_EXTRA4]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA6:%.*]] = and i32 [[OP_EXTRA5]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA7:%.*]] = and i32 [[OP_EXTRA6]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA8:%.*]] = and i32 [[OP_EXTRA7]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA9:%.*]] = and i32 [[OP_EXTRA8]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA10:%.*]] = and i32 [[OP_EXTRA9]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA11:%.*]] = and i32 [[OP_EXTRA10]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA12:%.*]] = and i32 [[OP_EXTRA11]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA13:%.*]] = and i32 [[OP_EXTRA12]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA14:%.*]] = and i32 [[OP_EXTRA13]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA15:%.*]] = and i32 [[OP_EXTRA14]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA16:%.*]] = and i32 [[OP_EXTRA15]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA17:%.*]] = and i32 [[OP_EXTRA16]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA18:%.*]] = and i32 [[OP_EXTRA17]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA19:%.*]] = and i32 [[OP_EXTRA18]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA20:%.*]] = and i32 [[OP_EXTRA19]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA21:%.*]] = and i32 [[OP_EXTRA20]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA22:%.*]] = and i32 [[OP_EXTRA21]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA23:%.*]] = and i32 [[OP_EXTRA22]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA24:%.*]] = and i32 [[OP_EXTRA23]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA25:%.*]] = and i32 [[OP_EXTRA24]], [[TMP0]] -; CHECK-NEXT: [[OP_EXTRA26:%.*]] = and i32 [[OP_EXTRA25]], [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[OP_EXTRA26]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[TMP5]], i32 14910, i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i32> poison, i32 [[TMP2]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP2]], i32 1 -; CHECK-NEXT: [[TMP9:%.*]] = and <2 x i32> [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = add <2 x i32> [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i32> [[TMP11]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP12]], i32 0 -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i32> [[TMP11]], i32 1 -; CHECK-NEXT: [[TMP15]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP14]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP19:%.*]], [[LOOP]] ], [ zeroinitializer, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0 +; CHECK-NEXT: [[VAL_0:%.*]] = add i32 [[TMP2]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 +; CHECK-NEXT: [[VAL_1:%.*]] = and i32 [[TMP3]], [[VAL_0]] +; CHECK-NEXT: [[VAL_2:%.*]] = and i32 [[VAL_1]], [[TMP0:%.*]] +; CHECK-NEXT: [[VAL_3:%.*]] = and i32 [[VAL_2]], [[TMP0]] +; CHECK-NEXT: [[VAL_4:%.*]] = and i32 [[VAL_3]], [[TMP0]] +; CHECK-NEXT: [[VAL_5:%.*]] = and i32 [[VAL_4]], [[TMP0]] +; CHECK-NEXT: [[VAL_6:%.*]] = add i32 [[TMP3]], 55 +; CHECK-NEXT: [[VAL_7:%.*]] = and i32 [[VAL_5]], [[VAL_6]] +; CHECK-NEXT: [[VAL_8:%.*]] = and i32 [[VAL_7]], [[TMP0]] +; CHECK-NEXT: [[VAL_9:%.*]] = and i32 [[VAL_8]], [[TMP0]] +; CHECK-NEXT: [[VAL_10:%.*]] = and i32 [[VAL_9]], [[TMP0]] +; CHECK-NEXT: [[VAL_11:%.*]] = add i32 [[TMP3]], 285 +; CHECK-NEXT: [[VAL_12:%.*]] = and i32 [[VAL_10]], [[VAL_11]] +; CHECK-NEXT: [[VAL_13:%.*]] = and i32 [[VAL_12]], [[TMP0]] +; CHECK-NEXT: [[VAL_14:%.*]] = and i32 [[VAL_13]], [[TMP0]] +; CHECK-NEXT: [[VAL_15:%.*]] = and i32 [[VAL_14]], [[TMP0]] +; CHECK-NEXT: [[VAL_16:%.*]] = and i32 [[VAL_15]], [[TMP0]] +; CHECK-NEXT: [[VAL_17:%.*]] = and i32 [[VAL_16]], [[TMP0]] +; CHECK-NEXT: [[VAL_18:%.*]] = add i32 [[TMP3]], 1240 +; CHECK-NEXT: [[VAL_19:%.*]] = and i32 [[VAL_17]], [[VAL_18]] +; CHECK-NEXT: [[VAL_20:%.*]] = add i32 [[TMP3]], 1496 +; CHECK-NEXT: [[VAL_21:%.*]] = and i32 [[VAL_19]], [[VAL_20]] +; CHECK-NEXT: [[VAL_22:%.*]] = and i32 [[VAL_21]], [[TMP0]] +; CHECK-NEXT: [[VAL_23:%.*]] = and i32 [[VAL_22]], [[TMP0]] +; CHECK-NEXT: [[VAL_24:%.*]] = and i32 [[VAL_23]], [[TMP0]] +; CHECK-NEXT: [[VAL_25:%.*]] = and i32 [[VAL_24]], [[TMP0]] +; CHECK-NEXT: [[VAL_26:%.*]] = and i32 [[VAL_25]], [[TMP0]] +; CHECK-NEXT: [[VAL_27:%.*]] = and i32 [[VAL_26]], [[TMP0]] +; CHECK-NEXT: [[VAL_28:%.*]] = and i32 [[VAL_27]], [[TMP0]] +; CHECK-NEXT: [[VAL_29:%.*]] = and i32 [[VAL_28]], [[TMP0]] +; CHECK-NEXT: [[VAL_30:%.*]] = and i32 [[VAL_29]], [[TMP0]] +; CHECK-NEXT: [[VAL_31:%.*]] = and i32 [[VAL_30]], [[TMP0]] +; CHECK-NEXT: [[VAL_32:%.*]] = and i32 [[VAL_31]], [[TMP0]] +; CHECK-NEXT: [[VAL_33:%.*]] = and i32 [[VAL_32]], [[TMP0]] +; CHECK-NEXT: [[VAL_34:%.*]] = add i32 [[TMP3]], 8555 +; CHECK-NEXT: [[VAL_35:%.*]] = and i32 [[VAL_33]], [[VAL_34]] +; CHECK-NEXT: [[VAL_36:%.*]] = and i32 [[VAL_35]], [[TMP0]] +; CHECK-NEXT: [[VAL_37:%.*]] = and i32 [[VAL_36]], [[TMP0]] +; CHECK-NEXT: [[VAL_38:%.*]] = and i32 [[VAL_37]], [[TMP0]] +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i32> poison, i32 [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> [[TMP4]], i32 [[TMP3]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i32> [[TMP5]], +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP6]], i32 0 +; CHECK-NEXT: [[VAL_40:%.*]] = and i32 [[VAL_38]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[TMP6]], i32 1 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[VAL_40]], i32 0 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i32> [[TMP9]], i32 14910, i32 1 +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP11]], i32 [[TMP3]], i32 1 +; CHECK-NEXT: [[TMP13:%.*]] = and <2 x i32> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = add <2 x i32> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP13]], <2 x i32> [[TMP14]], <2 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x i32> poison, i32 [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; CHECK-NEXT: [[TMP19]] = insertelement <2 x i32> [[TMP17]], i32 [[TMP18]], i32 1 ; CHECK-NEXT: br label [[LOOP]] ; ; FORCE_REDUCTION-LABEL: @Test( Index: llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll +++ llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll @@ -60,35 +60,34 @@ define void @powof2div_nonuniform(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){ ; AVX1-LABEL: @powof2div_nonuniform( ; AVX1-NEXT: entry: -; AVX1-NEXT: [[TMP0:%.*]] = load i32, i32* [[B:%.*]], align 4 -; AVX1-NEXT: [[TMP1:%.*]] = load i32, i32* [[C:%.*]], align 4 -; AVX1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] -; AVX1-NEXT: [[DIV:%.*]] = sdiv i32 [[ADD]], 2 -; AVX1-NEXT: store i32 [[DIV]], i32* [[A:%.*]], align 4 -; AVX1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 1 -; AVX1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4 -; AVX1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 1 -; AVX1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4 -; AVX1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], [[TMP2]] -; AVX1-NEXT: [[DIV6:%.*]] = sdiv i32 [[ADD5]], 4 -; AVX1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1 -; AVX1-NEXT: store i32 [[DIV6]], i32* [[ARRAYIDX7]], align 4 +; AVX1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 +; AVX1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 1 ; AVX1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 -; AVX1-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 ; AVX1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 2 -; AVX1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 -; AVX1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -; AVX1-NEXT: [[DIV11:%.*]] = sdiv i32 [[ADD10]], 8 -; AVX1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 -; AVX1-NEXT: store i32 [[DIV11]], i32* [[ARRAYIDX12]], align 4 ; AVX1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 -; AVX1-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4 +; AVX1-NEXT: [[TMP0:%.*]] = bitcast i32* [[B]] to <4 x i32>* +; AVX1-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 ; AVX1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 3 -; AVX1-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX14]], align 4 -; AVX1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP7]], [[TMP6]] -; AVX1-NEXT: [[DIV16:%.*]] = sdiv i32 [[ADD15]], 16 +; AVX1-NEXT: [[TMP2:%.*]] = bitcast i32* [[C]] to <4 x i32>* +; AVX1-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 +; AVX1-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]] +; AVX1-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP4]], i32 0 +; AVX1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 2 +; AVX1-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP4]], i32 1 +; AVX1-NEXT: [[DIV6:%.*]] = sdiv i32 [[TMP6]], 4 +; AVX1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1 +; AVX1-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP4]], i32 2 +; AVX1-NEXT: [[DIV11:%.*]] = sdiv i32 [[TMP7]], 8 +; AVX1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 +; AVX1-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP4]], i32 3 +; AVX1-NEXT: [[DIV16:%.*]] = sdiv i32 [[TMP8]], 16 ; AVX1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 -; AVX1-NEXT: store i32 [[DIV16]], i32* [[ARRAYIDX17]], align 4 +; AVX1-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> poison, i32 [[DIV]], i32 0 +; AVX1-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[DIV6]], i32 1 +; AVX1-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[DIV11]], i32 2 +; AVX1-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[DIV16]], i32 3 +; AVX1-NEXT: [[TMP13:%.*]] = bitcast i32* [[A]] to <4 x i32>* +; AVX1-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* [[TMP13]], align 4 ; AVX1-NEXT: ret void ; ; AVX2-LABEL: @powof2div_nonuniform( Index: llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll +++ llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll @@ -175,102 +175,49 @@ ; ; AVX-LABEL: @gather_load_3( ; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4 ; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 2 -; AVX-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2 -; AVX-NEXT: store i32 [[TMP8]], i32* [[TMP5]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4 -; AVX-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 3 -; AVX-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3 -; AVX-NEXT: store i32 [[TMP12]], i32* [[TMP9]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15 -; AVX-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], 4 -; AVX-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 4 -; AVX-NEXT: store i32 [[TMP16]], i32* [[TMP13]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 18 -; AVX-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], 1 -; AVX-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX-NEXT: store i32 [[TMP20]], i32* [[TMP17]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP24:%.*]] = add i32 [[TMP23]], 2 -; AVX-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX-NEXT: store i32 [[TMP24]], i32* [[TMP21]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP28:%.*]] = add i32 [[TMP27]], 3 -; AVX-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX-NEXT: store i32 [[TMP28]], i32* [[TMP25]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], 4 -; AVX-NEXT: store i32 [[TMP32]], i32* [[TMP29]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15 +; AVX-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP10:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 +; AVX-NEXT: [[TMP11:%.*]] = shufflevector <4 x i32*> [[TMP10]], <4 x i32*> undef, <4 x i32> zeroinitializer +; AVX-NEXT: [[TMP12:%.*]] = getelementptr i32, <4 x i32*> [[TMP11]], <4 x i64> +; AVX-NEXT: [[TMP13:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP12]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP13]], <4 x i32> undef, <8 x i32> +; AVX-NEXT: [[TMP15:%.*]] = insertelement <8 x i32> poison, i32 [[TMP3]], i32 0 +; AVX-NEXT: [[TMP16:%.*]] = insertelement <8 x i32> [[TMP15]], i32 [[TMP5]], i32 1 +; AVX-NEXT: [[TMP17:%.*]] = insertelement <8 x i32> [[TMP16]], i32 [[TMP7]], i32 2 +; AVX-NEXT: [[TMP18:%.*]] = insertelement <8 x i32> [[TMP17]], i32 [[TMP9]], i32 3 +; AVX-NEXT: [[TMP19:%.*]] = shufflevector <8 x i32> [[TMP18]], <8 x i32> [[TMP14]], <8 x i32> +; AVX-NEXT: [[TMP20:%.*]] = add <8 x i32> [[TMP19]], +; AVX-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP0:%.*]] to <8 x i32>* +; AVX-NEXT: store <8 x i32> [[TMP20]], <8 x i32>* [[TMP21]], align 4, [[TBAA0]] ; AVX-NEXT: ret void ; ; AVX2-LABEL: @gather_load_3( ; AVX2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX2-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, [[TBAA0]] ; AVX2-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 ; AVX2-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX2-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> +; AVX2-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> ; AVX2-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX2-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], -; AVX2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* -; AVX2-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX2-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX2-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX2-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4 -; AVX2-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP9]], <4 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP11:%.*]] = insertelement <2 x i32*> poison, i32* [[TMP1]], i32 0 +; AVX2-NEXT: [[TMP12:%.*]] = shufflevector <2 x i32*> [[TMP11]], <2 x i32*> undef, <2 x i32> zeroinitializer +; AVX2-NEXT: [[TMP13:%.*]] = getelementptr i32, <2 x i32*> [[TMP12]], <2 x i64> +; AVX2-NEXT: [[TMP14:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> [[TMP13]], i32 4, <2 x i1> , <2 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP14]], <2 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP16:%.*]] = insertelement <8 x i32> poison, i32 [[TMP3]], i32 0 +; AVX2-NEXT: [[TMP17:%.*]] = insertelement <8 x i32> [[TMP16]], i32 [[TMP5]], i32 1 +; AVX2-NEXT: [[TMP18:%.*]] = shufflevector <8 x i32> [[TMP17]], <8 x i32> [[TMP10]], <8 x i32> +; AVX2-NEXT: [[TMP19:%.*]] = shufflevector <8 x i32> [[TMP18]], <8 x i32> [[TMP15]], <8 x i32> +; AVX2-NEXT: [[TMP20:%.*]] = add <8 x i32> [[TMP19]], +; AVX2-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP0:%.*]] to <8 x i32>* +; AVX2-NEXT: store <8 x i32> [[TMP20]], <8 x i32>* [[TMP21]], align 4, [[TBAA0]] ; AVX2-NEXT: ret void -; -; AVX512-LABEL: @gather_load_3( -; AVX512-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX512-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 -; AVX512-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX512-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> -; AVX512-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX512-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], -; AVX512-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX512-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* -; AVX512-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX512-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX512-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX512-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3 -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX512-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX512-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4 -; AVX512-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]] -; AVX512-NEXT: ret void ; %3 = load i32, i32* %1, align 4, !tbaa !2 %4 = add i32 %3, 1 @@ -356,103 +303,50 @@ ; SSE-NEXT: ret void ; ; AVX-LABEL: @gather_load_4( -; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 ; AVX-NEXT: [[T6:%.*]] = getelementptr inbounds i32, i32* [[T1:%.*]], i64 11 -; AVX-NEXT: [[T9:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 2 ; AVX-NEXT: [[T10:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 4 -; AVX-NEXT: [[T13:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 3 ; AVX-NEXT: [[T14:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 15 -; AVX-NEXT: [[T17:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 4 -; AVX-NEXT: [[T18:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 18 -; AVX-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 +; AVX-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1]], i32 0 +; AVX-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer +; AVX-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> ; AVX-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] ; AVX-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]] ; AVX-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4, [[TBAA0]] ; AVX-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4, [[TBAA0]] -; AVX-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4, [[TBAA0]] -; AVX-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX-NEXT: [[T8:%.*]] = add i32 [[T7]], 2 -; AVX-NEXT: [[T12:%.*]] = add i32 [[T11]], 3 -; AVX-NEXT: [[T16:%.*]] = add i32 [[T15]], 4 -; AVX-NEXT: [[T20:%.*]] = add i32 [[T19]], 1 -; AVX-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T8]], i32* [[T5]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T12]], i32* [[T9]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T16]], i32* [[T13]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T20]], i32* [[T17]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> undef, <8 x i32> +; AVX-NEXT: [[TMP6:%.*]] = insertelement <8 x i32> poison, i32 [[T3]], i32 0 +; AVX-NEXT: [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[T7]], i32 1 +; AVX-NEXT: [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 [[T11]], i32 2 +; AVX-NEXT: [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 [[T15]], i32 3 +; AVX-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP9]], <8 x i32> [[TMP5]], <8 x i32> +; AVX-NEXT: [[TMP11:%.*]] = add <8 x i32> [[TMP10]], +; AVX-NEXT: [[TMP12:%.*]] = bitcast i32* [[T0:%.*]] to <8 x i32>* +; AVX-NEXT: store <8 x i32> [[TMP11]], <8 x i32>* [[TMP12]], align 4, [[TBAA0]] ; AVX-NEXT: ret void ; ; AVX2-LABEL: @gather_load_4( -; AVX2-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 -; AVX2-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1:%.*]], i32 0 +; AVX2-NEXT: [[T6:%.*]] = getelementptr inbounds i32, i32* [[T1:%.*]], i64 11 +; AVX2-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1]], i32 0 ; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> -; AVX2-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX2-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX2-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX2-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX2-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX2-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> +; AVX2-NEXT: [[TMP4:%.*]] = insertelement <2 x i32*> poison, i32* [[T1]], i32 0 +; AVX2-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32*> [[TMP4]], <2 x i32*> undef, <2 x i32> zeroinitializer +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr i32, <2 x i32*> [[TMP5]], <2 x i64> ; AVX2-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX2-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX2-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], -; AVX2-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX2-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX2-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX2-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>* -; AVX2-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] +; AVX2-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP7:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP9:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> [[TMP6]], i32 4, <2 x i1> , <2 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP10:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP11:%.*]] = insertelement <8 x i32> poison, i32 [[T3]], i32 0 +; AVX2-NEXT: [[TMP12:%.*]] = insertelement <8 x i32> [[TMP11]], i32 [[T7]], i32 1 +; AVX2-NEXT: [[TMP13:%.*]] = shufflevector <8 x i32> [[TMP12]], <8 x i32> [[TMP8]], <8 x i32> +; AVX2-NEXT: [[TMP14:%.*]] = shufflevector <8 x i32> [[TMP13]], <8 x i32> [[TMP10]], <8 x i32> +; AVX2-NEXT: [[TMP15:%.*]] = add <8 x i32> [[TMP14]], +; AVX2-NEXT: [[TMP16:%.*]] = bitcast i32* [[T0:%.*]] to <8 x i32>* +; AVX2-NEXT: store <8 x i32> [[TMP15]], <8 x i32>* [[TMP16]], align 4, [[TBAA0]] ; AVX2-NEXT: ret void -; -; AVX512-LABEL: @gather_load_4( -; AVX512-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 -; AVX512-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1:%.*]], i32 0 -; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> -; AVX512-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX512-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX512-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX512-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX512-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX512-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 -; AVX512-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX512-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX512-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], -; AVX512-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX512-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX512-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX512-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>* -; AVX512-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] -; AVX512-NEXT: ret void ; %t5 = getelementptr inbounds i32, i32* %t0, i64 1 %t6 = getelementptr inbounds i32, i32* %t1, i64 11 Index: llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll +++ llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll @@ -175,102 +175,49 @@ ; ; AVX-LABEL: @gather_load_3( ; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4 ; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 2 -; AVX-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2 -; AVX-NEXT: store i32 [[TMP8]], i32* [[TMP5]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4 -; AVX-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 3 -; AVX-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3 -; AVX-NEXT: store i32 [[TMP12]], i32* [[TMP9]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15 -; AVX-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], 4 -; AVX-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 4 -; AVX-NEXT: store i32 [[TMP16]], i32* [[TMP13]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 18 -; AVX-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], 1 -; AVX-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX-NEXT: store i32 [[TMP20]], i32* [[TMP17]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP24:%.*]] = add i32 [[TMP23]], 2 -; AVX-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX-NEXT: store i32 [[TMP24]], i32* [[TMP21]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP28:%.*]] = add i32 [[TMP27]], 3 -; AVX-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX-NEXT: store i32 [[TMP28]], i32* [[TMP25]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4, [[TBAA0]] -; AVX-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], 4 -; AVX-NEXT: store i32 [[TMP32]], i32* [[TMP29]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15 +; AVX-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP10:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 +; AVX-NEXT: [[TMP11:%.*]] = shufflevector <4 x i32*> [[TMP10]], <4 x i32*> undef, <4 x i32> zeroinitializer +; AVX-NEXT: [[TMP12:%.*]] = getelementptr i32, <4 x i32*> [[TMP11]], <4 x i64> +; AVX-NEXT: [[TMP13:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP12]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP13]], <4 x i32> undef, <8 x i32> +; AVX-NEXT: [[TMP15:%.*]] = insertelement <8 x i32> poison, i32 [[TMP3]], i32 0 +; AVX-NEXT: [[TMP16:%.*]] = insertelement <8 x i32> [[TMP15]], i32 [[TMP5]], i32 1 +; AVX-NEXT: [[TMP17:%.*]] = insertelement <8 x i32> [[TMP16]], i32 [[TMP7]], i32 2 +; AVX-NEXT: [[TMP18:%.*]] = insertelement <8 x i32> [[TMP17]], i32 [[TMP9]], i32 3 +; AVX-NEXT: [[TMP19:%.*]] = shufflevector <8 x i32> [[TMP18]], <8 x i32> [[TMP14]], <8 x i32> +; AVX-NEXT: [[TMP20:%.*]] = add <8 x i32> [[TMP19]], +; AVX-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP0:%.*]] to <8 x i32>* +; AVX-NEXT: store <8 x i32> [[TMP20]], <8 x i32>* [[TMP21]], align 4, [[TBAA0]] ; AVX-NEXT: ret void ; ; AVX2-LABEL: @gather_load_3( ; AVX2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX2-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11 +; AVX2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, [[TBAA0]] ; AVX2-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 ; AVX2-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX2-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> +; AVX2-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> ; AVX2-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX2-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], -; AVX2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* -; AVX2-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX2-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX2-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX2-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4 -; AVX2-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP9]], <4 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP11:%.*]] = insertelement <2 x i32*> poison, i32* [[TMP1]], i32 0 +; AVX2-NEXT: [[TMP12:%.*]] = shufflevector <2 x i32*> [[TMP11]], <2 x i32*> undef, <2 x i32> zeroinitializer +; AVX2-NEXT: [[TMP13:%.*]] = getelementptr i32, <2 x i32*> [[TMP12]], <2 x i64> +; AVX2-NEXT: [[TMP14:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> [[TMP13]], i32 4, <2 x i1> , <2 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP14]], <2 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP16:%.*]] = insertelement <8 x i32> poison, i32 [[TMP3]], i32 0 +; AVX2-NEXT: [[TMP17:%.*]] = insertelement <8 x i32> [[TMP16]], i32 [[TMP5]], i32 1 +; AVX2-NEXT: [[TMP18:%.*]] = shufflevector <8 x i32> [[TMP17]], <8 x i32> [[TMP10]], <8 x i32> +; AVX2-NEXT: [[TMP19:%.*]] = shufflevector <8 x i32> [[TMP18]], <8 x i32> [[TMP15]], <8 x i32> +; AVX2-NEXT: [[TMP20:%.*]] = add <8 x i32> [[TMP19]], +; AVX2-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP0:%.*]] to <8 x i32>* +; AVX2-NEXT: store <8 x i32> [[TMP20]], <8 x i32>* [[TMP21]], align 4, [[TBAA0]] ; AVX2-NEXT: ret void -; -; AVX512-LABEL: @gather_load_3( -; AVX512-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1 -; AVX512-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP1]], i32 0 -; AVX512-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX512-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> -; AVX512-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX512-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], -; AVX512-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5 -; AVX512-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* -; AVX512-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9 -; AVX512-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6 -; AVX512-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6 -; AVX512-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3 -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7 -; AVX512-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21 -; AVX512-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4 -; AVX512-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]] -; AVX512-NEXT: ret void ; %3 = load i32, i32* %1, align 4, !tbaa !2 %4 = add i32 %3, 1 @@ -356,103 +303,50 @@ ; SSE-NEXT: ret void ; ; AVX-LABEL: @gather_load_4( -; AVX-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 ; AVX-NEXT: [[T6:%.*]] = getelementptr inbounds i32, i32* [[T1:%.*]], i64 11 -; AVX-NEXT: [[T9:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 2 ; AVX-NEXT: [[T10:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 4 -; AVX-NEXT: [[T13:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 3 ; AVX-NEXT: [[T14:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 15 -; AVX-NEXT: [[T17:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 4 -; AVX-NEXT: [[T18:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 18 -; AVX-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 +; AVX-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1]], i32 0 +; AVX-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer +; AVX-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> ; AVX-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] ; AVX-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]] ; AVX-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4, [[TBAA0]] ; AVX-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4, [[TBAA0]] -; AVX-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4, [[TBAA0]] -; AVX-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX-NEXT: [[T8:%.*]] = add i32 [[T7]], 2 -; AVX-NEXT: [[T12:%.*]] = add i32 [[T11]], 3 -; AVX-NEXT: [[T16:%.*]] = add i32 [[T15]], 4 -; AVX-NEXT: [[T20:%.*]] = add i32 [[T19]], 1 -; AVX-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T8]], i32* [[T5]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T12]], i32* [[T9]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T16]], i32* [[T13]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T20]], i32* [[T17]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] +; AVX-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> undef, <8 x i32> +; AVX-NEXT: [[TMP6:%.*]] = insertelement <8 x i32> poison, i32 [[T3]], i32 0 +; AVX-NEXT: [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[T7]], i32 1 +; AVX-NEXT: [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 [[T11]], i32 2 +; AVX-NEXT: [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 [[T15]], i32 3 +; AVX-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP9]], <8 x i32> [[TMP5]], <8 x i32> +; AVX-NEXT: [[TMP11:%.*]] = add <8 x i32> [[TMP10]], +; AVX-NEXT: [[TMP12:%.*]] = bitcast i32* [[T0:%.*]] to <8 x i32>* +; AVX-NEXT: store <8 x i32> [[TMP11]], <8 x i32>* [[TMP12]], align 4, [[TBAA0]] ; AVX-NEXT: ret void ; ; AVX2-LABEL: @gather_load_4( -; AVX2-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 -; AVX2-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1:%.*]], i32 0 +; AVX2-NEXT: [[T6:%.*]] = getelementptr inbounds i32, i32* [[T1:%.*]], i64 11 +; AVX2-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1]], i32 0 ; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> -; AVX2-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX2-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX2-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX2-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX2-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX2-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> +; AVX2-NEXT: [[TMP4:%.*]] = insertelement <2 x i32*> poison, i32* [[T1]], i32 0 +; AVX2-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32*> [[TMP4]], <2 x i32*> undef, <2 x i32> zeroinitializer +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr i32, <2 x i32*> [[TMP5]], <2 x i64> ; AVX2-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX2-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX2-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX2-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], -; AVX2-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX2-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX2-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX2-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX2-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>* -; AVX2-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX2-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] +; AVX2-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]] +; AVX2-NEXT: [[TMP7:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP9:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> [[TMP6]], i32 4, <2 x i1> , <2 x i32> undef), [[TBAA0]] +; AVX2-NEXT: [[TMP10:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> undef, <8 x i32> +; AVX2-NEXT: [[TMP11:%.*]] = insertelement <8 x i32> poison, i32 [[T3]], i32 0 +; AVX2-NEXT: [[TMP12:%.*]] = insertelement <8 x i32> [[TMP11]], i32 [[T7]], i32 1 +; AVX2-NEXT: [[TMP13:%.*]] = shufflevector <8 x i32> [[TMP12]], <8 x i32> [[TMP8]], <8 x i32> +; AVX2-NEXT: [[TMP14:%.*]] = shufflevector <8 x i32> [[TMP13]], <8 x i32> [[TMP10]], <8 x i32> +; AVX2-NEXT: [[TMP15:%.*]] = add <8 x i32> [[TMP14]], +; AVX2-NEXT: [[TMP16:%.*]] = bitcast i32* [[T0:%.*]] to <8 x i32>* +; AVX2-NEXT: store <8 x i32> [[TMP15]], <8 x i32>* [[TMP16]], align 4, [[TBAA0]] ; AVX2-NEXT: ret void -; -; AVX512-LABEL: @gather_load_4( -; AVX512-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1 -; AVX512-NEXT: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* [[T1:%.*]], i32 0 -; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> undef, <4 x i32> zeroinitializer -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x i32*> [[TMP2]], <4 x i64> -; AVX512-NEXT: [[T21:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 5 -; AVX512-NEXT: [[T22:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 9 -; AVX512-NEXT: [[T25:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 6 -; AVX512-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6 -; AVX512-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7 -; AVX512-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21 -; AVX512-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> , <4 x i32> undef), [[TBAA0]] -; AVX512-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]] -; AVX512-NEXT: [[T4:%.*]] = add i32 [[T3]], 1 -; AVX512-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], -; AVX512-NEXT: [[T24:%.*]] = add i32 [[T23]], 2 -; AVX512-NEXT: [[T28:%.*]] = add i32 [[T27]], 3 -; AVX512-NEXT: [[T32:%.*]] = add i32 [[T31]], 4 -; AVX512-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]] -; AVX512-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>* -; AVX512-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]] -; AVX512-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]] -; AVX512-NEXT: ret void ; %t5 = getelementptr inbounds i32, i32* %t0, i64 1 %t6 = getelementptr inbounds i32, i32* %t1, i64 11 Index: llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll +++ llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll @@ -5,18 +5,20 @@ ; CHECK-LABEL: @rftbsub( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 2 -; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[ARRAYIDX6]], align 8 -; CHECK-NEXT: [[TMP1:%.*]] = or i64 2, 1 -; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = load double, double* [[ARRAYIDX12]], align 8 -; CHECK-NEXT: [[ADD16:%.*]] = fadd double [[TMP2]], undef +; CHECK-NEXT: [[TMP0:%.*]] = or i64 2, 1 +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 1 +; CHECK-NEXT: [[ADD16:%.*]] = fadd double [[TMP3]], undef ; CHECK-NEXT: [[MUL18:%.*]] = fmul double undef, [[ADD16]] ; CHECK-NEXT: [[ADD19:%.*]] = fadd double undef, [[MUL18]] ; CHECK-NEXT: [[SUB22:%.*]] = fsub double undef, undef -; CHECK-NEXT: [[SUB25:%.*]] = fsub double [[TMP0]], [[ADD19]] -; CHECK-NEXT: store double [[SUB25]], double* [[ARRAYIDX6]], align 8 -; CHECK-NEXT: [[SUB29:%.*]] = fsub double [[TMP2]], [[SUB22]] -; CHECK-NEXT: store double [[SUB29]], double* [[ARRAYIDX12]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[ADD19]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[SUB22]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = fsub <2 x double> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 8 ; CHECK-NEXT: unreachable ; entry: