diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h --- a/llvm/include/llvm/ADT/SmallVector.h +++ b/llvm/include/llvm/ADT/SmallVector.h @@ -32,8 +32,6 @@ namespace llvm { -template class iterator_range; - /// This is all the stuff common to all SmallVectors. /// /// The template parameter specifies the type which should be used to hold the @@ -1205,12 +1203,6 @@ this->append(S, E); } - template - explicit SmallVector(const iterator_range &R) - : SmallVectorImpl(N) { - this->append(R.begin(), R.end()); - } - SmallVector(std::initializer_list IL) : SmallVectorImpl(N) { this->assign(IL); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -326,8 +326,7 @@ /// This is a helper function which calls the two-argument getUserCost /// with \p Operands which are the current operands U has. InstructionCost getUserCost(const User *U, TargetCostKind CostKind) const { - SmallVector Operands(U->operand_values()); - return getUserCost(U, Operands, CostKind); + return getUserCost(U, to_vector<4>(U->operand_values()), CostKind); } /// If a branch or a select condition is skewed in one direction by more than diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -1049,12 +1049,13 @@ TTI::OperandValueProperties Op2VP = TTI::OP_None; TTI::OperandValueKind Op1VK = TTI::getOperandInfo(U->getOperand(0), Op1VP); - TTI::OperandValueKind Op2VK = Opcode != Instruction::FNeg ? - TTI::getOperandInfo(U->getOperand(1), Op2VP) : TTI::OK_AnyValue; - SmallVector Operands(U->operand_values()); - return TargetTTI->getArithmeticInstrCost(Opcode, Ty, CostKind, - Op1VK, Op2VK, - Op1VP, Op2VP, Operands, I); + TTI::OperandValueKind Op2VK = + Opcode != Instruction::FNeg + ? TTI::getOperandInfo(U->getOperand(1), Op2VP) + : TTI::OK_AnyValue; + return TargetTTI->getArithmeticInstrCost( + Opcode, Ty, CostKind, Op1VK, Op2VK, Op1VP, Op2VP, + to_vector<2>(U->operand_values()), I); } case Instruction::IntToPtr: case Instruction::PtrToInt: @@ -1237,8 +1238,8 @@ } InstructionCost getInstructionLatency(const Instruction *I) { - SmallVector Operands(I->operand_values()); - if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free) + if (getUserCost(I, to_vector<4>(I->operand_values()), TTI::TCK_Latency) == + TTI::TCC_Free) return 0; if (isa(I)) diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h --- a/llvm/include/llvm/IR/DebugInfoMetadata.h +++ b/llvm/include/llvm/IR/DebugInfoMetadata.h @@ -252,7 +252,7 @@ TempGenericDINode cloneImpl() const { return getTemporary(getContext(), getTag(), getHeader(), - SmallVector(dwarf_operands())); + to_vector_of(dwarf_operands())); } public: diff --git a/llvm/include/llvm/IR/PredIteratorCache.h b/llvm/include/llvm/IR/PredIteratorCache.h --- a/llvm/include/llvm/IR/PredIteratorCache.h +++ b/llvm/include/llvm/IR/PredIteratorCache.h @@ -44,7 +44,7 @@ if (Entry) return Entry; - SmallVector PredCache(predecessors(BB)); + SmallVector PredCache = to_vector<32>(predecessors(BB)); PredCache.push_back(nullptr); // null terminator. BlockToPredCountMap[BB] = PredCache.size() - 1; diff --git a/llvm/include/llvm/Support/CFGDiff.h b/llvm/include/llvm/Support/CFGDiff.h --- a/llvm/include/llvm/Support/CFGDiff.h +++ b/llvm/include/llvm/Support/CFGDiff.h @@ -136,7 +136,7 @@ using DirectedNodeT = std::conditional_t, NodePtr>; auto R = children(N); - VectRet Res = VectRet(detail::reverse_if(R)); + VectRet Res = to_vector<8>(detail::reverse_if(R)); // Remove nullptr children for clang. llvm::erase_value(Res, nullptr); diff --git a/llvm/include/llvm/Support/GenericDomTree.h b/llvm/include/llvm/Support/GenericDomTree.h --- a/llvm/include/llvm/Support/GenericDomTree.h +++ b/llvm/include/llvm/Support/GenericDomTree.h @@ -838,7 +838,7 @@ "NewBB should have a single successor!"); NodeRef NewBBSucc = *GraphT::child_begin(NewBB); - SmallVector PredBlocks(children>(NewBB)); + auto PredBlocks = to_vector_of(children>(NewBB)); assert(!PredBlocks.empty() && "No predblocks?"); diff --git a/llvm/include/llvm/Support/GenericDomTreeConstruction.h b/llvm/include/llvm/Support/GenericDomTreeConstruction.h --- a/llvm/include/llvm/Support/GenericDomTreeConstruction.h +++ b/llvm/include/llvm/Support/GenericDomTreeConstruction.h @@ -116,7 +116,7 @@ using DirectedNodeT = std::conditional_t, NodePtr>; auto R = children(N); - SmallVector Res(detail::reverse_if(R)); + auto Res = to_vector<8>(detail::reverse_if(R)); // Remove nullptr children for clang. llvm::erase_value(Res, nullptr); diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -709,8 +709,8 @@ // The manipulations performed when we're looking through an insertvalue or // an extractvalue would happen at the front of the RetPath list, so since // we have to copy it anyway it's more efficient to create a reversed copy. - SmallVector TmpRetPath(llvm::reverse(RetPath)); - SmallVector TmpCallPath(llvm::reverse(CallPath)); + auto TmpRetPath = to_vector<4>(llvm::reverse(RetPath)); + auto TmpCallPath = to_vector<4>(llvm::reverse(CallPath)); // Finally, we can check whether the value produced by the tail call at this // index is compatible with the value we return. diff --git a/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.cpp @@ -47,6 +47,7 @@ InlinedAt = InlinedAt->getInlinedAt(); } - SmallVector InlineStack(llvm::reverse(ReversedInlineStack)); + SmallVector InlineStack = + to_vector<8>(llvm::reverse(ReversedInlineStack)); Asm->OutStreamer->emitPseudoProbe(Guid, Index, Type, Attr, InlineStack); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -588,7 +588,7 @@ // are removed. SmallSetVector WorkList; for (BasicBlock &BB : F) { - SmallVector Successors(successors(&BB)); + auto Successors = to_vector<2>(successors(&BB)); MadeChange |= ConstantFoldTerminator(&BB, true); if (!MadeChange) continue; @@ -601,7 +601,7 @@ MadeChange |= !WorkList.empty(); while (!WorkList.empty()) { BasicBlock *BB = WorkList.pop_back_val(); - SmallVector Successors(successors(BB)); + auto Successors = to_vector<2>(successors(BB)); DeleteDeadBlock(BB); @@ -5500,7 +5500,7 @@ if (MemoryInst->getParent() != GEP->getParent()) return false; - SmallVector Ops(GEP->operands()); + auto Ops = to_vector_of(GEP->operands()); bool RewriteGEP = false; diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp --- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -700,8 +700,8 @@ // Set a sensible insertion point so that subsequent calls to // MIRBuilder. MIRBuilder.setMBB(*MBB); - SmallVector WorkList( - make_pointer_range(reverse(MBB->instrs()))); + SmallVector WorkList = + to_vector(make_pointer_range(reverse(MBB->instrs()))); while (!WorkList.empty()) { MachineInstr &MI = *WorkList.pop_back_val(); diff --git a/llvm/lib/CodeGen/IfConversion.cpp b/llvm/lib/CodeGen/IfConversion.cpp --- a/llvm/lib/CodeGen/IfConversion.cpp +++ b/llvm/lib/CodeGen/IfConversion.cpp @@ -2262,7 +2262,8 @@ if (ToBBI.IsBrAnalyzable) ToBBI.BB->normalizeSuccProbs(); - SmallVector FromSuccs(FromMBB.successors()); + SmallVector FromSuccs = + to_vector<4>(FromMBB.successors()); MachineBasicBlock *NBB = getNextBlock(FromMBB); MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr; // The edge probability from ToBBI.BB to FromMBB, which is only needed when diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -2457,7 +2457,8 @@ bool Changed = false; // Order predecessors by RPOT order, for exploring them in that order. - SmallVector BlockOrders(MBB.predecessors()); + SmallVector BlockOrders = + to_vector<8>(MBB.predecessors()); auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { return BBToOrder[A] < BBToOrder[B]; diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -3197,8 +3197,8 @@ MachineBasicBlock *Fallthrough = nullptr; BranchProbability DefaultBranchProb = BranchProbability::getZero(); BlockFrequency BBDupThreshold(scaleThreshold(BB)); - SmallVector Preds(BB->predecessors()); - SmallVector Succs(BB->successors()); + SmallVector Preds = to_vector<8>(BB->predecessors()); + SmallVector Succs = to_vector<8>(BB->successors()); // Sort for highest frequency. auto CmpSucc = [&](MachineBasicBlock *A, MachineBasicBlock *B) { diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -859,7 +859,8 @@ if (Succs != AllSuccessors.end()) return Succs->second; - SmallVector AllSuccs(MBB->successors()); + SmallVector AllSuccs = + to_vector<4>(MBB->successors()); // Handle cases where sinking can happen but where the sink point isn't a // successor. For example: diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp --- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp +++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp @@ -106,7 +106,7 @@ assert(CI->getCalledFunction() && "Cannot lower an indirect call!"); IRBuilder<> Builder(CI->getParent(), CI->getIterator()); - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); CallInst *NewCI = Builder.CreateCall(FCache, Args); NewCI->setName(CI->getName()); diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp --- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp +++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp @@ -384,7 +384,8 @@ if (LiveOut != MI) return; - SmallVector ToVisit(MBB->successors()); + SmallVector ToVisit = + to_vector<4>(MBB->successors()); SmallPtrSetVisited; while (!ToVisit.empty()) { MachineBasicBlock *MBB = ToVisit.pop_back_val(); diff --git a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp --- a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp +++ b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp @@ -69,7 +69,7 @@ // Replace the call to the vector intrinsic with a call // to the corresponding function from the vector library. IRBuilder<> IRBuilder(&CI); - SmallVector Args(CI.args()); + auto Args = to_vector_of(CI.args()); // Preserve the operand bundles. SmallVector OpBundles; CI.getOperandBundlesAsDefs(OpBundles); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2078,7 +2078,7 @@ if (Node->isStrictFPOpcode()) { EVT RetVT = Node->getValueType(0); - SmallVector Ops(drop_begin(Node->ops())); + auto Ops = to_vector_of(drop_begin(Node->ops())); TargetLowering::MakeLibCallOptions CallOptions; // FIXME: This doesn't support tail calls. std::pair Tmp = TLI.makeLibCall(DAG, LC, RetVT, diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -171,7 +171,7 @@ // Don't add glue to something that already has a glue value. if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false; - SmallVector VTs(N->values()); + SmallVector VTs = to_vector<4>(N->values()); if (AddGlue) VTs.push_back(MVT::Glue); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6123,7 +6123,7 @@ DILocalVariable *Variable = DI.getVariable(); DIExpression *Expression = DI.getExpression(); dropDanglingDebugInfo(Variable, Expression); - SmallVector Values(DI.getValues()); + SmallVector Values = to_vector<4>(DI.getValues()); if (Values.empty()) return; diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp --- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp +++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp @@ -142,7 +142,7 @@ /// instruction with those returned by the personality function. void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal, Value *SelVal) { - SmallVector UseWorkList(LPI->users()); + auto UseWorkList = to_vector_of(LPI->users()); while (!UseWorkList.empty()) { Value *Val = UseWorkList.pop_back_val(); auto *EVI = dyn_cast(Val); diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -720,7 +720,8 @@ SmallVectorImpl &Copies) { SmallPtrSet Succs(TailBB->succ_begin(), TailBB->succ_end()); - SmallVector Preds(TailBB->predecessors()); + SmallVector Preds = + to_vector<8>(TailBB->predecessors()); bool Changed = false; for (MachineBasicBlock *PredBB : Preds) { if (PredBB->hasEHPadSuccessor() || PredBB->mayHaveInlineAsmBr()) diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp --- a/llvm/lib/CodeGen/WasmEHPrepare.cpp +++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp @@ -181,7 +181,7 @@ continue; Changed = true; auto *BB = ThrowI->getParent(); - SmallVector Succs(successors(BB)); + SmallVector Succs = to_vector<4>(successors(BB)); auto &InstList = BB->getInstList(); InstList.erase(std::next(BasicBlock::iterator(ThrowI)), InstList.end()); IRB.SetInsertPoint(BB); diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp --- a/llvm/lib/CodeGen/WinEHPrepare.cpp +++ b/llvm/lib/CodeGen/WinEHPrepare.cpp @@ -572,7 +572,8 @@ // the last need to set the following one as its TryParentState. const auto *CatchSwitch = cast(Pad); int CatchState = -1, FollowerState = -1; - SmallVector CatchBlocks(CatchSwitch->handlers()); + SmallVector CatchBlocks = + to_vector<4>(CatchSwitch->handlers()); for (const BasicBlock *CatchBlock : llvm::reverse(CatchBlocks)) { // Create the entry for this catch with the appropriate handler // properties. diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1842,7 +1842,7 @@ } else return false; - SmallVector Args(CI.args()); + auto Args = to_vector_of(CI.args()); Args.pop_back(); Args.pop_back(); Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), @@ -2279,7 +2279,7 @@ { CI->getOperand(0), CI->getArgOperand(1) }); Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.cmp.p")) { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); Type *OpTy = Args[0]->getType(); unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); unsigned EltWidth = OpTy->getScalarSizeInBits(); @@ -3830,7 +3830,7 @@ cast(NewFn->getReturnType())->getNumElements() && "Must have same number of elements"); - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); Value *NewCI = Builder.CreateCall(NewFn, Args); Value *Res = PoisonValue::get(OldST); for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) { @@ -3854,7 +3854,7 @@ case Intrinsic::arm_neon_vst2lane: case Intrinsic::arm_neon_vst3lane: case Intrinsic::arm_neon_vst4lane: { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); NewCall = Builder.CreateCall(NewFn, Args); break; } @@ -3965,7 +3965,7 @@ case Intrinsic::x86_xop_vpermil2ps: case Intrinsic::x86_xop_vpermil2pd_256: case Intrinsic::x86_xop_vpermil2ps_256: { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); VectorType *FltIdxTy = cast(Args[2]->getType()); VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); @@ -4026,7 +4026,7 @@ case Intrinsic::x86_avx2_mpsadbw: { // Need to truncate the last argument from i32 to i8 -- this argument models // an inherently 8-bit immediate operand to these x86 instructions. - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); // Replace the last argument with a trunc. Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); @@ -4040,7 +4040,7 @@ case Intrinsic::x86_avx512_mask_cmp_ps_128: case Intrinsic::x86_avx512_mask_cmp_ps_256: case Intrinsic::x86_avx512_mask_cmp_ps_512: { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); unsigned NumElts = cast(Args[0]->getType())->getNumElements(); Args[3] = getX86MaskVec(Builder, Args[3], NumElts); @@ -4061,7 +4061,7 @@ case Intrinsic::invariant_start: case Intrinsic::invariant_end: { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); NewCall = Builder.CreateCall(NewFn, Args); break; } @@ -4069,7 +4069,7 @@ case Intrinsic::masked_store: case Intrinsic::masked_gather: case Intrinsic::masked_scatter: { - SmallVector Args(CI->args()); + auto Args = to_vector_of(CI->args()); NewCall = Builder.CreateCall(NewFn, Args); NewCall->copyMetadata(*CI); break; diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -3428,8 +3428,7 @@ } Instruction *ConstantExpr::getAsInstruction(Instruction *InsertBefore) const { - SmallVector ValueOperands(operands()); - ArrayRef Ops(ValueOperands); + ArrayRef Ops = to_vector_of(operands()); switch (getOpcode()) { case Instruction::Trunc: diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp --- a/llvm/lib/IR/LLVMContextImpl.cpp +++ b/llvm/lib/IR/LLVMContextImpl.cpp @@ -192,7 +192,7 @@ unsigned Hash = hash_combine_range(N->op_begin() + Offset, N->op_end()); #ifndef NDEBUG { - SmallVector MDs(drop_begin(N->operands(), Offset)); + auto MDs = to_vector_of(drop_begin(N->operands(), Offset)); unsigned RawHash = calculateHash(MDs); assert(Hash == RawHash && "Expected hash of MDOperand to equal hash of Metadata*"); diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -89,7 +89,8 @@ assert(Offset.getBitWidth() == DL.getIndexSizeInBits(getPointerAddressSpace()) && "The offset bit width does not match DL specification."); - SmallVector Index(llvm::drop_begin(operand_values())); + SmallVector Index = + to_vector(llvm::drop_begin(operand_values())); return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index, DL, Offset, ExternalAnalysis); } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2731,7 +2731,7 @@ // Check constraints that this basic block imposes on all of the PHI nodes in // it. if (isa(BB.front())) { - SmallVector Preds(predecessors(&BB)); + auto Preds = to_vector<8>(predecessors(&BB)); SmallVector, 8> Values; llvm::sort(Preds); for (const PHINode &PN : BB.phis()) { @@ -3724,7 +3724,7 @@ "GEP base pointer is not a vector or a vector of pointers", &GEP); Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP); - SmallVector Idxs(GEP.indices()); + auto Idxs = to_vector_of(GEP.indices()); Check( all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }), "GEP indexes must be integers", &GEP); diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp --- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp +++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp @@ -421,7 +421,7 @@ auto VMI = VariableMap.find(Key); // Update the map if we found a new value/expression describing the // variable, or if the variable wasn't mapped already. - SmallVector Values(DVI->getValues()); + SmallVector Values = to_vector<4>(DVI->getValues()); if (VMI == VariableMap.end() || VMI->second.first != Values || VMI->second.second != DVI->getExpression()) { VariableMap[Key] = {Values, DVI->getExpression()}; diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp --- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp +++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp @@ -112,7 +112,7 @@ Builder.SetInsertPoint(&MergeBlock->front()); PHINode *Phi = Builder.CreatePHI(OrigInst->getType(), 0); - SmallVector UsersToUpdate(OrigInst->users()); + SmallVector UsersToUpdate = to_vector<16>(OrigInst->users()); for (User *U : UsersToUpdate) U->replaceUsesOfWith(OrigInst, Phi); Phi->addIncoming(OrigInst, OrigInst->getParent()); @@ -163,7 +163,7 @@ // Save the users of the calling instruction. These uses will be changed to // use the bitcast after we create it. - SmallVector UsersToUpdate(CB.users()); + SmallVector UsersToUpdate = to_vector<16>(CB.users()); // Determine an appropriate location to create the bitcast for the return // value. The location depends on if we have a call or invoke instruction. diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -767,7 +767,7 @@ NewBB = BasicBlock::Create(ExitBB->getContext(), ExitBB->getName() + ".split", ExitBB->getParent(), ExitBB); - SmallVector Preds(predecessors(ExitBB)); + SmallVector Preds = to_vector<4>(predecessors(ExitBB)); for (BasicBlock *PredBB : Preds) if (Blocks.count(PredBB)) PredBB->getTerminator()->replaceUsesOfWith(ExitBB, NewBB); diff --git a/llvm/lib/Transforms/Utils/Debugify.cpp b/llvm/lib/Transforms/Utils/Debugify.cpp --- a/llvm/lib/Transforms/Utils/Debugify.cpp +++ b/llvm/lib/Transforms/Utils/Debugify.cpp @@ -268,7 +268,7 @@ NamedMDNode *NMD = M.getModuleFlagsMetadata(); if (!NMD) return Changed; - SmallVector Flags(NMD->operands()); + SmallVector Flags = to_vector<4>(NMD->operands()); NMD->clearOperands(); for (MDNode *Flag : Flags) { auto *Key = cast(Flag->getOperand(1)); diff --git a/llvm/lib/Transforms/Utils/GuardUtils.cpp b/llvm/lib/Transforms/Utils/GuardUtils.cpp --- a/llvm/lib/Transforms/Utils/GuardUtils.cpp +++ b/llvm/lib/Transforms/Utils/GuardUtils.cpp @@ -30,7 +30,7 @@ void llvm::makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard, bool UseWC) { OperandBundleDef DeoptOB(*Guard->getOperandBundle(LLVMContext::OB_deopt)); - SmallVector Args(drop_begin(Guard->args())); + auto Args = to_vector_of(drop_begin(Guard->args())); auto *CheckBB = Guard->getParent(); auto *DeoptBlockTerm = diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -2157,7 +2157,7 @@ Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(), Attrs.getRetAttrs(), ArgAttrs); // Add VarArgs to existing parameters. - SmallVector Params(CI->args()); + auto Params = to_vector_of(CI->args()); Params.append(VarArgsToForward.begin(), VarArgsToForward.end()); CallInst *NewCI = CallInst::Create( CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI); @@ -2391,7 +2391,7 @@ auto *CurBB = RI->getParent(); RI->eraseFromParent(); - SmallVector CallArgs(DeoptCall->args()); + auto CallArgs = to_vector_of(DeoptCall->args()); SmallVector OpBundles; DeoptCall->getOperandBundlesAsDefs(OpBundles); diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1126,7 +1126,7 @@ // If there is more than one pred of succ, and there are PHI nodes in // the successor, then we need to add incoming edges for the PHI nodes // - const PredBlockVector BBPreds(predecessors(BB)); + const PredBlockVector BBPreds = to_vector(predecessors(BB)); // Loop over all of the PHI nodes in the successor of BB. for (BasicBlock::iterator I = Succ->begin(); isa(I); ++I) { @@ -2159,7 +2159,7 @@ } CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { - SmallVector Args(II->args()); + auto Args = to_vector_of(II->args()); SmallVector OpBundles; II->getOperandBundlesAsDefs(OpBundles); CallInst *NewCall = CallInst::Create(II->getFunctionType(), @@ -2218,7 +2218,7 @@ BB->getInstList().pop_back(); // Create the new invoke instruction. - SmallVector InvokeArgs(CI->args()); + auto InvokeArgs = to_vector_of(CI->args()); SmallVector OpBundles; CI->getOperandBundlesAsDefs(OpBundles); diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp --- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp @@ -616,7 +616,7 @@ // one predecessor. Note that Exit could be an exit block for multiple // nested loops, causing both of the edges to now be critical and need to // be split. - SmallVector ExitPreds(predecessors(Exit)); + SmallVector ExitPreds = to_vector<4>(predecessors(Exit)); bool SplitLatchEdge = false; for (BasicBlock *ExitPred : ExitPreds) { // We only need to split loop exit edges. diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp --- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp @@ -164,7 +164,8 @@ Value *BrLoopExit = B.CreateICmpULT(BECount, ConstantInt::get(BECount->getType(), Count - 1)); // Split the exit to maintain loop canonicalization guarantees - SmallVector Preds(predecessors(OriginalLoopLatchExit)); + SmallVector Preds = + to_vector<4>(predecessors(OriginalLoopLatchExit)); SplitBlockPredecessors(OriginalLoopLatchExit, Preds, ".unr-lcssa", DT, LI, nullptr, PreserveLCSSA); // Add the branch to the exit block (around the unrolled loop) @@ -287,7 +288,7 @@ Value *BrLoopExit = B.CreateIsNotNull(ModVal, "lcmp.mod"); assert(Exit && "Loop must have a single exit block only"); // Split the epilogue exit to maintain loop canonicalization guarantees - SmallVector Preds(predecessors(Exit)); + SmallVector Preds = to_vector<4>(predecessors(Exit)); SplitBlockPredecessors(Exit, Preds, ".epilog-lcssa", DT, LI, nullptr, PreserveLCSSA); // Add the branch to the exit block (around the unrolling loop)