Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -114,6 +114,10 @@ ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, cl::desc("Attempt to vectorize horizontal reductions")); +static cl::opt +SLPThrottling("slp-throttling", cl::init(false), cl::Hidden, + cl::desc("Enable tree partial vectorize with throttling")); + static cl::opt ShouldStartVectorizeHorAtStore( "slp-vectorize-hor-store", cl::init(false), cl::Hidden, cl::desc( @@ -517,7 +521,15 @@ /// \returns the cost incurred by unwanted spills and fills, caused by /// holding live values over call sites. - int getSpillCost(); + int getSpillCost(const SmallPtrSetImpl &ScalarsToVec); + + /// \returns the cost extracting vectorized elements. + int getExtractCost(const SmallPtrSetImpl &ScalarsToVec); + + /// \returns the cost of gathering canceled elements to be used + /// by vectorized operations during throtelling. + int getInsertCost(const SmallPtrSetImpl &VecToScalars, + unsigned Height); /// \returns the vectorization cost of the subtree that starts at \p VL. /// A negative number means that this is profitable. @@ -542,6 +554,8 @@ ScalarToTreeEntry.clear(); MustGather.clear(); ExternalUses.clear(); + InternalTreeUses.clear(); + RemovedOperations.clear(); NumOpsWantToKeepOrder.clear(); NumOpsWantToKeepOriginalOrder = 0; for (auto &Iter : BlocksSchedules) { @@ -549,6 +563,7 @@ BS->clear(); } MinBWs.clear(); + ScalarizeAt = 0; } unsigned getTreeSize() const { return VectorizableTree.size(); } @@ -601,6 +616,9 @@ /// vectorizable. We do not vectorize such trees. bool isTreeTinyAndNotFullyVectorizable(); + /// Cut the tree to make it partially vectorizable. + void cutTree(); + OptimizationRemarkEmitter *getORE() { return ORE; } private: @@ -743,6 +761,9 @@ /// Maps a specific scalar to its tree entry. SmallDenseMap ScalarToTreeEntry; + /// Tree entries that should not be vectorized due to throttling. + SmallVector RemovedOperations; + /// A list of scalars that we found that we need to keep as scalars. ValueSet MustGather; @@ -762,6 +783,9 @@ }; using UserList = SmallVector; + /// \returns the cost extracting vectorized element. + int getExtractOperationCost(ExternalUser &EU); + /// Checks if two instructions may access the same memory. /// /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it @@ -813,6 +837,9 @@ /// after vectorization. UserList ExternalUses; + /// Internal tree proposed to vectorized values use in that tree. + SmallDenseMap InternalTreeUses; + /// Values used only by @llvm.assume calls. SmallPtrSet EphValues; @@ -822,6 +849,9 @@ /// A list of blocks that we are going to CSE. SetVector CSEBlocks; + /// Scalarizing above this node during Throttling. + unsigned ScalarizeAt = 0; + /// Contains all scheduling relevant data for an instruction. /// A ScheduleData either represents a single instruction or a member of an /// instruction bundle (= a group of instructions which is combined into a @@ -1171,6 +1201,9 @@ /// Attaches the BlockScheduling structures to basic blocks. MapVector> BlocksSchedules; + /// Remove operations from the list of proposed to schedule. + void removeFromScheduling(BlockScheduling *BS); + /// Performs the "real" scheduling. Done before vectorization is actually /// performed in a basic block. void scheduleBlock(BlockScheduling *BS); @@ -1376,6 +1409,7 @@ LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U << ".\n"); assert(!UseEntry->NeedToGather && "Bad state"); + InternalTreeUses[U].push_back(ExternalUser(Scalar, U, FoundLane)); continue; } } @@ -2419,6 +2453,60 @@ } } +void BoUpSLP::cutTree() { + + if (ScalarizeAt == (VectorizableTree.size() - 1) || ScalarizeAt == 0) + return; + + // Canceling unprofitable elements. + SmallPtrSet Removed; + for (unsigned I = ScalarizeAt + 1, E = VectorizableTree.size(); I < E; I++) { + TreeEntry *Entry = &VectorizableTree[I]; + if (Entry->NeedToGather) + continue; + Entry->NeedToGather = true; + for (Value *V : Entry->Scalars) { + LLVM_DEBUG(dbgs() << "SLP: Remove scalar " << *V + << " out of proposed to vectorize.\n"); + ScalarToTreeEntry.erase(V); + Removed.insert(V); + RemovedOperations.push_back(I); + MustGather.insert(V); + ExternalUses.erase( + std::remove_if(ExternalUses.begin(), ExternalUses.end(), + [&](ExternalUser &EU) { return EU.Scalar == V; }), + ExternalUses.end()); + } + } + + // For all canceled operations we should consider the possibility of + // use by with non-canceled operations and for that, it requires + // to populate ExternalUser list with canceled elements. + for (unsigned I = 0, E = ScalarizeAt; I <= E; I++) { + TreeEntry *Entry = &VectorizableTree[I]; + if (Entry->NeedToGather) + continue; + for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { + Value *Scalar = Entry->Scalars[Lane]; + for (User *U : Scalar->users()) { + LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); + Instruction *UserInst = dyn_cast(U); + if (!UserInst) + continue; + if (Removed.count(U) == 0) + continue; + // Ignore users in the user ignore list. + if (is_contained(UserIgnoreList, UserInst)) + continue; + LLVM_DEBUG(dbgs() << "SLP: Need to extract canceled operation :" + << *U << " from lane " << Lane + << " from " << *Scalar << ".\n"); + ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); + } + } + } +} + bool BoUpSLP::isFullyVectorizableTinyTree() { LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n"); @@ -2463,7 +2551,7 @@ return true; } -int BoUpSLP::getSpillCost() { +int BoUpSLP::getSpillCost(const SmallPtrSetImpl &ScalarsToVec) { // Walk from the bottom of the tree to the top, tracking which values are // live. When we see a call instruction that is not part of our tree, // query TTI to see if there is a cost to keeping values live over it @@ -2487,7 +2575,7 @@ // Update LiveValues. LiveValues.erase(PrevInst); for (auto &J : PrevInst->operands()) { - if (isa(&*J) && getTreeEntry(&*J)) + if (isa(&*J) && ScalarsToVec.count(&*J) > 0) LiveValues.insert(cast(&*J)); } @@ -2528,15 +2616,88 @@ return Cost; } +int BoUpSLP::getExtractOperationCost(ExternalUser &EU) { + unsigned BundleWidth = VectorizableTree.front().Scalars.size(); + + // Uses by ephemeral values are free (because the ephemeral value will be + // removed prior to code generation, and so the extraction will be + // removed as well). + if (EphValues.count(EU.User)) + return 0; + + // If we plan to rewrite the tree in a smaller type, we will need to sign + // extend the extracted value back to the original type. Here, we account + // for the extract and the added cost of the sign extend if needed. + auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); + auto *ScalarRoot = VectorizableTree[0].Scalars[0]; + + if (MinBWs.count(ScalarRoot)) { + auto *MinTy = IntegerType::get(F->getContext(), + MinBWs[ScalarRoot].first); + auto Extend = + MinBWs[ScalarRoot].second ? + Instruction::SExt : Instruction::ZExt; + VecTy = VectorType::get(MinTy, BundleWidth); + return (TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), + VecTy, EU.Lane)); + } + return TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); +} + +int BoUpSLP::getExtractCost(const SmallPtrSetImpl &ScalarsToVec) { + int ExtractCost = 0; + SmallPtrSet ExtractCostCalculated; + for (ExternalUser &EU : ExternalUses) { + // We only add extract cost once for the same scalar. + if (!ExtractCostCalculated.insert(EU.Scalar).second) + continue; + + // Avoid non-vectorized scalars for this tree hight. + if (!ScalarsToVec.count(EU.Scalar)) { + // Consider the possibility of extracting vectorized + // values for canceled elements use. + if (InternalTreeUses.find(EU.Scalar) != InternalTreeUses.end()) + for (ExternalUser &IU : InternalTreeUses[EU.Scalar]) + ExtractCost += getExtractOperationCost(IU); + continue; + } + + ExtractCost += getExtractOperationCost(EU); + } + return ExtractCost; +} + +int BoUpSLP::getInsertCost(const SmallPtrSetImpl &VecToScalars, + unsigned Height) { + int InsertCost = 0; + for (int I = Height; I >= 0; I--) { + TreeEntry *Entry = &VectorizableTree[I]; + if (Entry->NeedToGather) + continue; + for (Value *V : Entry->Scalars) { + auto *Inst = cast(V); + for (Use &U : Inst->operands()) { + Value *Op = U.get(); + if (VecToScalars.count(Op)) + InsertCost += getGatherCost(Op); + } + } + } + return InsertCost; +} + int BoUpSLP::getTreeCost() { - int Cost = 0; + SmallDenseMap> UseTreeIndices; + SmallPtrSet ScalarsToVec; + SmallPtrSet VecToScalars; + SmallVector EntriesCosts; + unsigned FlowInstAt = UINT_MAX; + int CostSum = 0; LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"); - - unsigned BundleWidth = VectorizableTree[0].Scalars.size(); - for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { TreeEntry &TE = VectorizableTree[I]; + EntriesCosts.push_back(0); // We create duplicate tree entries for gather sequences that have multiple // uses. However, we should not compute the cost of duplicate sequences. @@ -2557,47 +2718,80 @@ })) continue; - int C = getEntryCost(&TE); - LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C + if (TE.NeedToGather) { + for (int UseTreeIdx : TE.UserTreeIndices) + UseTreeIndices[UseTreeIdx].push_back(I); + } else { + for (Value *V : TE.Scalars) + ScalarsToVec.insert(V); + unsigned Opcode = cast(TE.Scalars[0])->getOpcode(); + // For partial vectorization it is not always profitable + // to vectorize flow type of operations. + if (FlowInstAt == UINT_MAX && + (Opcode == Instruction::PHI || Opcode == Instruction::Select)) + FlowInstAt = I; + } + + EntriesCosts[I] = getEntryCost(&TE); + LLVM_DEBUG(dbgs() << "SLP: Adding cost " << EntriesCosts[I] << " for bundle that starts with " << *TE.Scalars[0] << ".\n"); - Cost += C; + + CostSum += EntriesCosts[I]; } - SmallPtrSet ExtractCostCalculated; int ExtractCost = 0; - for (ExternalUser &EU : ExternalUses) { - // We only add extract cost once for the same scalar. - if (!ExtractCostCalculated.insert(EU.Scalar).second) - continue; - - // Uses by ephemeral values are free (because the ephemeral value will be - // removed prior to code generation, and so the extraction will be - // removed as well). - if (EphValues.count(EU.User)) - continue; - - // If we plan to rewrite the tree in a smaller type, we will need to sign - // extend the extracted value back to the original type. Here, we account - // for the extract and the added cost of the sign extend if needed. - auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); - auto *ScalarRoot = VectorizableTree[0].Scalars[0]; - if (MinBWs.count(ScalarRoot)) { - auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); - auto Extend = - MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; - VecTy = VectorType::get(MinTy, BundleWidth); - ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), - VecTy, EU.Lane); - } else { - ExtractCost += - TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); + int SpillCost = 0; + int Cost = CostSum; + + ExtractCost = getExtractCost(ScalarsToVec); + Cost += ExtractCost; + SpillCost = getSpillCost(ScalarsToVec); + Cost += SpillCost; + + if (SLPThrottling && ScalarizeAt == 0 && Cost >= -SLPCostThreshold) { + int ScalarizeAtCost = 0; + int ScalarizeAtExtractCost = 0; + int ScalarizeAtSpillCost = 0; + for (unsigned I = VectorizableTree.size() - 1; I > 0; --I) { + TreeEntry *Entry = &VectorizableTree[I]; + + if (Entry->NeedToGather) + continue; + int GatherCost = 0; + auto It = UseTreeIndices.find(I); + if (It != UseTreeIndices.end()) + for (int Gather : It->second) + GatherCost += EntriesCosts[Gather]; + + ScalarizeAtCost = CostSum; + + ScalarizeAtCost += getInsertCost(VecToScalars, I); + ScalarizeAtExtractCost = getExtractCost(ScalarsToVec); + ScalarizeAtCost += ScalarizeAtExtractCost; + ScalarizeAtSpillCost = getSpillCost(ScalarsToVec); + ScalarizeAtCost += ScalarizeAtSpillCost; + if (ScalarizeAtCost < -SLPCostThreshold && FlowInstAt > I) { + ScalarizeAt = I; + break; + } + CostSum -= EntriesCosts[I] + GatherCost; + for (Value *V : Entry->Scalars) { + ScalarsToVec.erase(V); + VecToScalars.insert(V); + } + } + if (ScalarizeAt != 0 && ScalarizeAt != (VectorizableTree.size() - 1)) { + LLVM_DEBUG(dbgs() << "SLP: Reduced the tree cost by " + << ScalarizeAtCost - Cost + << " to make it partially vectorizable.\n"); + Cost = ScalarizeAtCost; + ExtractCost = ScalarizeAtExtractCost; + SpillCost = ScalarizeAtSpillCost; + cutTree(); } } - int SpillCost = getSpillCost(); - Cost += SpillCost + ExtractCost; - std::string Str; { raw_string_ostream OS(Str); @@ -3580,7 +3774,12 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { // All blocks must be scheduled before any instructions are inserted. for (auto &BSIter : BlocksSchedules) { - scheduleBlock(BSIter.second.get()); + BlockScheduling *BS = BSIter.second.get(); + // Remove all Schedule Data from all nodes that we have changed + // vectorization decision. + if (RemovedOperations.size()) + removeFromScheduling(BS); + scheduleBlock(BS); } Builder.SetInsertPoint(&F->getEntryBlock().front()); @@ -3709,13 +3908,16 @@ Type *Ty = Scalar->getType(); if (!Ty->isVoidTy()) { #ifndef NDEBUG - for (User *U : Scalar->users()) { - LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); - - // It is legal to replace users in the ignorelist by undef. - assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && - "Replacing out-of-tree value with undef"); - } + // The tree might not be fully vectorized, so we don't have to + // check every user. + if (!RemovedOperations.size()) + for (User *U : Scalar->users()) { + LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); + + // It is legal to replace users in the ignorelist by undef. + assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && + "Replacing out-of-tree value with undef"); + } #endif Value *Undef = UndefValue::get(Ty); Scalar->replaceAllUsesWith(Undef); @@ -4192,6 +4394,32 @@ ReadyInsts.clear(); } +void BoUpSLP::removeFromScheduling(BlockScheduling *BS) { + bool Removed = false; + for (int I : RemovedOperations) { + TreeEntry *Entry = &VectorizableTree[I]; + ScheduleData *SD = BS->getScheduleData(Entry->Scalars[0]); + if (SD && SD->isPartOfBundle()) { + if (!Removed) { + Removed = true; + BS->resetSchedule(); + } + BS->cancelScheduling(Entry->Scalars, SD->OpValue); + } + } + if (!Removed) + return; + BS->resetSchedule(); + BS->initialFillReadyList(BS->ReadyInsts); + for (Instruction *I = BS->ScheduleStart; I != BS->ScheduleEnd; + I = I->getNextNode()) { + if (BS->ScheduleDataMap.find(I) == BS->ScheduleDataMap.end()) + continue; + BS->doForAllOpcodes(I, + [&](ScheduleData *SD) { SD->clearDependencies(); }); + } +} + void BoUpSLP::scheduleBlock(BlockScheduling *BS) { if (!BS->ScheduleStart) return; @@ -4688,7 +4916,15 @@ const unsigned ChainLen = Chain.size(); LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen << "\n"); - const unsigned Sz = R.getVectorElementSize(Chain[0]); + Value *FirstStore = nullptr; + for (Value *V : Chain) { + if (auto *I = dyn_cast(V)) + if (I->getOperand(0)) + FirstStore = V; + } + if (!FirstStore) + return false; + const unsigned Sz = R.getVectorElementSize(FirstStore); const unsigned VF = VecRegSize / Sz; if (!isPowerOf2_32(Sz) || VF < 2) @@ -4709,6 +4945,13 @@ << "\n"); ArrayRef Operands = Chain.slice(i, VF); + // Skip if any store instruction vectorized. + if (std::any_of(Operands.begin(), Operands.end(), + [](Value *V) { + return (!(cast(V))->getValueOperand()); + })) + continue; + R.buildTree(Operands); if (R.isTreeTinyAndNotFullyVectorizable()) continue; Index: test/Transforms/SLPVectorizer/X86/slp-throttle.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/slp-throttle.ll +++ test/Transforms/SLPVectorizer/X86/slp-throttle.ll @@ -1,22 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s +; RUN: opt -slp-vectorizer -S -slp-throttling -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s define dso_local void @rftbsub(double* %a) local_unnamed_addr #0 { ; CHECK-LABEL: @rftbsub( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 2 -; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[ARRAYIDX6]], align 8 -; CHECK-NEXT: [[TMP1:%.*]] = or i64 2, 1 -; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = load double, double* [[ARRAYIDX12]], align 8 -; CHECK-NEXT: [[ADD16:%.*]] = fadd double [[TMP2]], undef +; CHECK-NEXT: [[TMP0:%.*]] = or i64 2, 1 +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 1 +; CHECK-NEXT: [[ADD16:%.*]] = fadd double [[TMP3]], undef ; CHECK-NEXT: [[MUL18:%.*]] = fmul double undef, [[ADD16]] ; CHECK-NEXT: [[ADD19:%.*]] = fadd double undef, [[MUL18]] ; CHECK-NEXT: [[SUB22:%.*]] = fsub double undef, undef -; CHECK-NEXT: [[SUB25:%.*]] = fsub double [[TMP0]], [[ADD19]] -; CHECK-NEXT: store double [[SUB25]], double* [[ARRAYIDX6]], align 8 -; CHECK-NEXT: [[SUB29:%.*]] = fsub double [[TMP2]], [[SUB22]] -; CHECK-NEXT: store double [[SUB29]], double* [[ARRAYIDX12]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> undef, double [[ADD19]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[SUB22]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = fsub <2 x double> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = bitcast double* [[ARRAYIDX6]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP6]], <2 x double>* [[TMP7]], align 8 ; CHECK-NEXT: unreachable ; entry: